From a713f85e35ebef641254e521f49e1cd6f6a12be6 Mon Sep 17 00:00:00 2001 From: John Murret Date: Thu, 31 Mar 2022 09:58:05 -0600 Subject: [PATCH] ACLs Refactor. Bootstrap Token and Snapshot Agent Config in Vault. Pre-configured bootstrap token as k8s secret. (#1128) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Use kube auth method to provision ACL token for the crd controller (#995) * Use a Consul Kubernetes Auth Method to issue consul-login to mint ACL tokens and consul-logout to clean them up for the CRD controller. Co-authored-by: Iryna Shustava * Support storing bootstrap token in Vault (#1061) * Global auth method (#1075) • Update server-acl-init to create authmethods in the primary datacenter when the job is run in a secondary datacenter during federation. This authmethod allows us to issue logins for global policies. • Update the controller workflow in server-acl-init to use this global authmethod when run in a secondary DC. • Update the mesh-gateway acceptance tests to create proxy defaults in the secondary DC to test above behavior works successfully. • Updated logout to not pass in the partition flag as it is not required. • Update server acl init tests to migrate from require := require.New(t) to require.xyz(t, ...) patterns. * Refactor ConnectInject to use authmethods (#1076) Refactor connect-injector to use the new auth-method workflow when ACLs are enabled so that Kubernetes secrets are not used. * Sync token acl refactor (#1081) • Refactor sync-catalog to use the new auth-method workflow when ACLs are enabled so that Kubernetes secrets are not used. • Create a service account and rolebinding dedicated to the component authmethod so that it no longer piggybacks on the one used by the connect-inject authmethod. * rename the controller flag (#1089) * Refactor Consul API Gateway Controller to use AuthMethod workflow. (#1083) * Refactor Consul API Gateway Controller to use AuthMethod workflow. * Refactor snapshot agent to use new acl authmethod workflow (#1084) * refactor snapshot agent to use new acl authmethod workflow. * Refactor mesh-gateway ACL flow (#1085) * Refactor mesh-gateway ACL flow * Fix flakey server-acl-init tests with retries (#1095) * Fix flakey server-acl-init tests with retries * Adding retry for flakey server-acl-init enterprise test * adding missing retry module in server-acl-init enterprise tests * Update Binding Rule if it exists for the authmethod (#1094) * Enable ACL Client Token (#1093) * Refactor ConsulLogin() to return the acltoken in addition to theerror. * Refactor createACLPolicyRoleAndBindingRule toappend datacenters for local tokens. Refactor updateOrCreateBindingRule to create binding rule if there are binding rules but this one does not exist * Rename -create-client-token flag to -client * set additional sans for consul server load balancer so that client will be able to use the certificate to talk to the load balancers rather than just an individual server. * Refactor server-acl-init command to create ACL Policy and Rule for client component so that client can call ConsulLogin and receive and ACL Token Call. * Enable client to talk to Consul Server to perform consul login. * Pass Auth Method to k8s al-init command. * Configure Consul address to be the Consul Server Load Balancer. * Configure CA Cert volume to be in memory rather than k8s secret when using vault. * Set consul/login volume and CONSUL_HTTP_TOKEN_FILE for use during logout. * Setup prestop command to perform consul logout. * Configure client-daemonset so that we can utilize the externalServers setting to configure clients to be able to call consul login on a server that is on a different partition. * Configuring partition-init to remove additional flags and use ones that already exist * adding missing comma * fix flakey tests by wrapping asserts in retries a la Iryna * Adding -use-https flag to client-daemonset.yaml when externalServers are enabled * Refactoring tests to cover client-acl-init changes * addressing PR comments * removing mounted tmpfs for consul-ca-cert when using vault and restoring datacenter logic because of breaking test. * addressing PR comments and only appending datacenters to a policy when its a local token, not global tokens. * completing additional dns names based on PR feedback * Do not ca-cert volume when using vault. * removing unused flagConsulCACert from partition-init command * PR Feedback. Removing unused envvars in acl-init container. changing ConsulLogin to return secretID, error instead ok token, error. * vault: add support for admin partitions (#1098) * Refactor common.Login (#1101) * convert function args to a struct * add some missing tests * move logic that is only relevant for connect out * Use bootstrap token from vault to validate exec'ing into consul server (#1116) Follow up on #1103 * Enable terminating gateways to use ACL Auth Method (#1102) * Enable terminating gateway policy to be generated via Auth Method * Filtering out failing portion of test for terminating gateway work * PR feedback.Fixing tests. Changing naming conventions for policy and roles for terminating gateways. * Update control-plane/subcommand/server-acl-init/command.go Co-authored-by: Iryna Shustava * Update control-plane/subcommand/server-acl-init/command.go Co-authored-by: Iryna Shustava * Fixing enterprise tests * Changing terminating gateway to pass acl-init a -component-name flag in the form of terminating-gateway/RELEASE-NAME-consul-terminating - /- * fixing acceptance test to recognize that long lived tokens will not exist and we ahve to update the role. * Correcting serviceAccount used on deployment * Making all nameshavea-ingress-gateway * Update charts/consul/templates/terminating-gateways-deployment.yaml Co-authored-by: Iryna Shustava * Update control-plane/subcommand/server-acl-init/command.go Co-authored-by: Iryna Shustava * Update control-plane/subcommand/server-acl-init/command.go Co-authored-by: Iryna Shustava * Update control-plane/subcommand/server-acl-init/command.go Co-authored-by: Iryna Shustava Co-authored-by: Iryna Shustava * Enable snapshot agent configuration to be retrieved from vault (#1113) * Enable ACL Client Token (#1093) * Refactor ConsulLogin() to return the acltoken in addition to theerror. * Refactor createACLPolicyRoleAndBindingRule toappend datacenters for local tokens. Refactor updateOrCreateBindingRule to create binding rule if there are binding rules but this one does not exist * Rename -create-client-token flag to -client * set additional sans for consul server load balancer so that client will be able to use the certificate to talk to the load balancers rather than just an individual server. * Refactor server-acl-init command to create ACL Policy and Rule for client component so that client can call ConsulLogin and receive and ACL Token Call. * Enable client to talk to Consul Server to perform consul login. * Pass Auth Method to k8s al-init command. * Configure Consul address to be the Consul Server Load Balancer. * Configure CA Cert volume to be in memory rather than k8s secret when using vault. * Set consul/login volume and CONSUL_HTTP_TOKEN_FILE for use during logout. * Setup prestop command to perform consul logout. * Configure client-daemonset so that we can utilize the externalServers setting to configure clients to be able to call consul login on a server that is on a different partition. * Configuring partition-init to remove additional flags and use ones that already exist * adding missing comma * fix flakey tests by wrapping asserts in retries a la Iryna * Adding -use-https flag to client-daemonset.yaml when externalServers are enabled * Refactoring tests to cover client-acl-init changes * addressing PR comments * removing mounted tmpfs for consul-ca-cert when using vault and restoring datacenter logic because of breaking test. * addressing PR comments and only appending datacenters to a policy when its a local token, not global tokens. * completing additional dns names based on PR feedback * Do not ca-cert volume when using vault. * removing unused flagConsulCACert from partition-init command * PR Feedback. Removing unused envvars in acl-init container. changing ConsulLogin to return secretID, error instead ok token, error. * Enable snapshot agent configuration to be retrieved from vault * Adding CHANGELOG entry * Changing the decoding of snapshot agent config in vault to platform agnostic * Fixing the way we write the encoded vault secret out to a decoded json file * Decoding vault secret using consul template function on the vault annotation. Able to remove the bash that decodes the file and changes the extension. * Update CHANGELOG.md Co-authored-by: Iryna Shustava * Update charts/consul/values.yaml Co-authored-by: Iryna Shustava * Update charts/consul/values.yaml Co-authored-by: Iryna Shustava * Update charts/consul/values.yaml Co-authored-by: Iryna Shustava * PR Feedback - change client-snapshot-deployment to only have one vault role entry even when needing to set to vault roles * PR Feedback - when both snapshot agent and ca roles are specified in vault, it should get the sa role. * Simplifying conditional for vault role. Co-authored-by: Iryna Shustava * Ability to set initial_management token when using k8s secret store. Snapshot agent acceptance tests (#1125) * Enable ACL Client Token (#1093) * Refactor ConsulLogin() to return the acltoken in addition to theerror. * Refactor createACLPolicyRoleAndBindingRule toappend datacenters for local tokens. Refactor updateOrCreateBindingRule to create binding rule if there are binding rules but this one does not exist * Rename -create-client-token flag to -client * set additional sans for consul server load balancer so that client will be able to use the certificate to talk to the load balancers rather than just an individual server. * Refactor server-acl-init command to create ACL Policy and Rule for client component so that client can call ConsulLogin and receive and ACL Token Call. * Enable client to talk to Consul Server to perform consul login. * Pass Auth Method to k8s al-init command. * Configure Consul address to be the Consul Server Load Balancer. * Configure CA Cert volume to be in memory rather than k8s secret when using vault. * Set consul/login volume and CONSUL_HTTP_TOKEN_FILE for use during logout. * Setup prestop command to perform consul logout. * Configure client-daemonset so that we can utilize the externalServers setting to configure clients to be able to call consul login on a server that is on a different partition. * Configuring partition-init to remove additional flags and use ones that already exist * adding missing comma * fix flakey tests by wrapping asserts in retries a la Iryna * Adding -use-https flag to client-daemonset.yaml when externalServers are enabled * Refactoring tests to cover client-acl-init changes * addressing PR comments * removing mounted tmpfs for consul-ca-cert when using vault and restoring datacenter logic because of breaking test. * addressing PR comments and only appending datacenters to a policy when its a local token, not global tokens. * completing additional dns names based on PR feedback * Do not ca-cert volume when using vault. * removing unused flagConsulCACert from partition-init command * PR Feedback. Removing unused envvars in acl-init container. changing ConsulLogin to return secretID, error instead ok token, error. * Enable snapshot agent configuration to be retrieved from vault * Adding CHANGELOG entry * Changing the decoding of snapshot agent config in vault to platform agnostic * Fixing the way we write the encoded vault secret out to a decoded json file * Decoding vault secret using consul template function on the vault annotation. Able to remove the bash that decodes the file and changes the extension. * Adding an acceptance test for snapshot agent. It currently fails because of a bug with Consul where it does not recognize CONSUL_HTTP_TOKEN. Will need to refactor test to bootstrap, then create vault secret with embedded acl token, then helm upgrade to add snapshot agent. Then assert that a *.snap file is created. * Adding acceptance test for snapshot agent on vault. * renaming test and removing extra file * Move vault test helpers into framework folder so we can use it more easily from other folders. * Adding snapshot agent test for k8s secret * Adding ability to set initial_management token when using k8s secrets. Also working acceptance test for snapshot agent on k8s secrets. * Adding bats tests. Adding envvar for ACL_BOOTSTRAP_TOKEN. Removing volume and volume mounts for bootstrap token. * Adding CHANGELOG entry for ability to pre-set bootstrap ACL token * Fixing bats tests * Update acceptance/framework/consul/helm_cluster.go Co-authored-by: Thomas Eckert * Fixing broken unit tests * Lowering snapshot interval from 1mto15s for tests * Update acceptance/framework/consul/helm_cluster.go Co-authored-by: Nitya Dhanushkodi * Update acceptance/framework/vault/helpers.go Co-authored-by: Nitya Dhanushkodi * Update acceptance/tests/snapshot-agent/snapshot_agent_vault_test.go Co-authored-by: Nitya Dhanushkodi * PR Feedback - clarify comments on Vault helper functions * PR Feedback - clarify comments on Vault helper functions * Modifying tests to not incidentally send an encoded file * Removing logging token in acceptance test code. Co-authored-by: Thomas Eckert Co-authored-by: Nitya Dhanushkodi * Enable ingress gateways to use ACL Auth Method (#1118) * Enable ACL Client Token (#1093) * Refactor ConsulLogin() to return the acltoken in addition to theerror. * Refactor createACLPolicyRoleAndBindingRule toappend datacenters for local tokens. Refactor updateOrCreateBindingRule to create binding rule if there are binding rules but this one does not exist * Rename -create-client-token flag to -client * set additional sans for consul server load balancer so that client will be able to use the certificate to talk to the load balancers rather than just an individual server. * Refactor server-acl-init command to create ACL Policy and Rule for client component so that client can call ConsulLogin and receive and ACL Token Call. * Enable client to talk to Consul Server to perform consul login. * Pass Auth Method to k8s al-init command. * Configure Consul address to be the Consul Server Load Balancer. * Configure CA Cert volume to be in memory rather than k8s secret when using vault. * Set consul/login volume and CONSUL_HTTP_TOKEN_FILE for use during logout. * Setup prestop command to perform consul logout. * Configure client-daemonset so that we can utilize the externalServers setting to configure clients to be able to call consul login on a server that is on a different partition. * Configuring partition-init to remove additional flags and use ones that already exist * adding missing comma * fix flakey tests by wrapping asserts in retries a la Iryna * Adding -use-https flag to client-daemonset.yaml when externalServers are enabled * Refactoring tests to cover client-acl-init changes * addressing PR comments * removing mounted tmpfs for consul-ca-cert when using vault and restoring datacenter logic because of breaking test. * addressing PR comments and only appending datacenters to a policy when its a local token, not global tokens. * completing additional dns names based on PR feedback * Do not ca-cert volume when using vault. * removing unused flagConsulCACert from partition-init command * PR Feedback. Removing unused envvars in acl-init container. changing ConsulLogin to return secretID, error instead ok token, error. * Enable terminating gateway policy to be generated via Auth Method * Filtering out failing portion of test for terminating gateway work * PR feedback.Fixing tests. Changing naming conventions for policy and roles for terminating gateways. * Changing terminating gateway to pass acl-init a -component-name flag in the form of terminating-gateway/RELEASE-NAME-consul-terminating - /- * Correcting serviceAccount used on deployment * Making all nameshavea-ingress-gateway * Enable ingress gateway policy to be generated via Auth Method * Making all names have a -ingress-gateway suffix * Removing duplicate test * Update acceptance/tests/ingress-gateway/ingress_gateway_namespaces_test.go Co-authored-by: Nitya Dhanushkodi Co-authored-by: Nitya Dhanushkodi * Removing the gateway type suffix from the naming conventions for terminating and ingress gateways (#1120) * Enable ACL Client Token (#1093) * Refactor ConsulLogin() to return the acltoken in addition to theerror. * Refactor createACLPolicyRoleAndBindingRule toappend datacenters for local tokens. Refactor updateOrCreateBindingRule to create binding rule if there are binding rules but this one does not exist * Rename -create-client-token flag to -client * set additional sans for consul server load balancer so that client will be able to use the certificate to talk to the load balancers rather than just an individual server. * Refactor server-acl-init command to create ACL Policy and Rule for client component so that client can call ConsulLogin and receive and ACL Token Call. * Enable client to talk to Consul Server to perform consul login. * Pass Auth Method to k8s al-init command. * Configure Consul address to be the Consul Server Load Balancer. * Configure CA Cert volume to be in memory rather than k8s secret when using vault. * Set consul/login volume and CONSUL_HTTP_TOKEN_FILE for use during logout. * Setup prestop command to perform consul logout. * Configure client-daemonset so that we can utilize the externalServers setting to configure clients to be able to call consul login on a server that is on a different partition. * Configuring partition-init to remove additional flags and use ones that already exist * adding missing comma * fix flakey tests by wrapping asserts in retries a la Iryna * Adding -use-https flag to client-daemonset.yaml when externalServers are enabled * Refactoring tests to cover client-acl-init changes * addressing PR comments * removing mounted tmpfs for consul-ca-cert when using vault and restoring datacenter logic because of breaking test. * addressing PR comments and only appending datacenters to a policy when its a local token, not global tokens. * completing additional dns names based on PR feedback * Do not ca-cert volume when using vault. * removing unused flagConsulCACert from partition-init command * PR Feedback. Removing unused envvars in acl-init container. changing ConsulLogin to return secretID, error instead ok token, error. * Enable terminating gateway policy to be generated via Auth Method * Filtering out failing portion of test for terminating gateway work * PR feedback.Fixing tests. Changing naming conventions for policy and roles for terminating gateways. * Correcting serviceAccount used on deployment * Making all nameshavea-ingress-gateway * Enable ingress gateway policy to be generated via Auth Method * Making all names have a -ingress-gateway suffix * Removing duplicate test * Update acceptance/tests/ingress-gateway/ingress_gateway_namespaces_test.go Co-authored-by: Nitya Dhanushkodi * Removing the gateway type suffix from the naming conventions for terminating and ingress gateways * Adding check for duplicate terminating gateways and ingress gateway names * Update charts/consul/templates/ingress-gateways-deployment.yaml Co-authored-by: Luke Kysow <1034429+lkysow@users.noreply.github.com> * PR Feedback - adding the duplicate name found to the check failures for duplicate ingress or terminating gateway names * Fixing rebase conflict * Merge Conflict- duplicate test * Adding a 10s sleep to flakey snapshot agent tests that were not finding a snapshot in time. Co-authored-by: Nitya Dhanushkodi Co-authored-by: Luke Kysow <1034429+lkysow@users.noreply.github.com> Co-authored-by: Kyle Schochenmaier Co-authored-by: Iryna Shustava Co-authored-by: Ashwin Venkatesh Co-authored-by: Thomas Eckert Co-authored-by: Nitya Dhanushkodi Co-authored-by: Luke Kysow <1034429+lkysow@users.noreply.github.com> --- .circleci/config.yml | 4 +- CHANGELOG.md | 3 + acceptance/framework/consul/helm_cluster.go | 42 +- .../{tests => framework}/vault/helpers.go | 140 +- acceptance/go.mod | 2 +- .../ingress_gateway_namespaces_test.go | 18 +- .../ingress-gateway/ingress_gateway_test.go | 15 +- .../tests/mesh-gateway/mesh_gateway_test.go | 39 +- acceptance/tests/snapshot-agent/main_test.go | 15 + .../snapshot_agent_k8s_secret_test.go | 134 ++ .../snapshot_agent_vault_test.go | 141 ++ .../terminating_gateway_namespaces_test.go | 8 +- .../terminating_gateway_test.go | 30 +- .../tests/vault/vault_partitions_test.go | 250 ++ acceptance/tests/vault/vault_test.go | 53 +- acceptance/tests/vault/vault_wan_fed_test.go | 76 +- charts/consul/templates/_helpers.tpl | 14 +- .../api-gateway-controller-deployment.yaml | 155 +- .../templates/auth-method-clusterrole.yaml | 18 + .../auth-method-clusterrolebinding.yaml | 39 + .../templates/auth-method-serviceaccount.yaml | 19 + charts/consul/templates/client-daemonset.yaml | 72 +- .../client-snapshot-agent-deployment.yaml | 296 ++- .../templates/client-snapshot-agent-role.yaml | 27 +- ...-inject-authmethod-clusterrolebinding.yaml | 22 - .../templates/connect-inject-clusterrole.yaml | 24 +- .../connect-inject-clusterrolebinding.yaml | 2 +- .../templates/connect-inject-deployment.yaml | 76 +- .../connect-inject-serviceaccount.yaml | 2 +- .../templates/controller-clusterrole.yaml | 9 - .../templates/controller-deployment.yaml | 75 +- .../ingress-gateways-deployment.yaml | 39 +- .../templates/ingress-gateways-role.yaml | 2 +- .../templates/mesh-gateway-clusterrole.yaml | 9 - .../templates/mesh-gateway-deployment.yaml | 68 +- .../consul/templates/partition-init-job.yaml | 42 +- .../consul/templates/server-acl-init-job.yaml | 61 +- .../templates/server-acl-init-role.yaml | 40 +- .../consul/templates/server-statefulset.yaml | 36 +- .../templates/sync-catalog-clusterrole.yaml | 9 - .../templates/sync-catalog-deployment.yaml | 87 +- .../terminating-gateways-deployment.yaml | 48 +- .../templates/terminating-gateways-role.yaml | 2 +- charts/consul/templates/tls-init-job.yaml | 2 + .../api-gateway-controller-deployment.bats | 294 ++- .../test/unit/auth-method-clusterrole.bats | 20 + .../unit/auth-method-clusterrolebinding.bats | 20 + .../test/unit/auth-method-serviceaccount.bats | 41 + charts/consul/test/unit/client-daemonset.bats | 478 +++- .../client-snapshot-agent-deployment.bats | 388 ++- .../test/unit/client-snapshot-agent-role.bats | 28 - ...-inject-authmethod-clusterrolebinding.bats | 42 - .../test/unit/connect-inject-clusterrole.bats | 14 - .../connect-inject-clusterrolebinding.bats | 2 +- .../test/unit/connect-inject-deployment.bats | 221 +- .../unit/connect-inject-serviceaccount.bats | 1 + .../test/unit/controller-clusterrole.bats | 14 - .../test/unit/controller-deployment.bats | 224 +- charts/consul/test/unit/helpers.bats | 32 +- .../unit/ingress-gateways-deployment.bats | 213 +- .../test/unit/ingress-gateways-role.bats | 2 +- .../test/unit/mesh-gateway-clusterrole.bats | 14 +- .../test/unit/mesh-gateway-deployment.bats | 283 ++- .../consul/test/unit/partition-init-job.bats | 324 ++- .../consul/test/unit/server-acl-init-job.bats | 342 ++- .../test/unit/server-acl-init-role.bats | 5 +- .../consul/test/unit/server-statefulset.bats | 106 +- .../test/unit/sync-catalog-clusterrole.bats | 14 - .../test/unit/sync-catalog-deployment.bats | 218 +- .../unit/terminating-gateways-deployment.bats | 190 +- .../test/unit/terminating-gateways-role.bats | 2 +- charts/consul/test/unit/tls-init-job.bats | 31 + charts/consul/values.yaml | 94 +- control-plane/commands.go | 5 + control-plane/helper/test/test_util.go | 85 +- control-plane/subcommand/acl-init/command.go | 172 +- .../subcommand/acl-init/command_test.go | 206 +- control-plane/subcommand/common/common.go | 124 +- .../subcommand/common/common_test.go | 168 +- .../subcommand/connect-init/command.go | 82 +- .../subcommand/consul-logout/command.go | 99 + .../subcommand/consul-logout/command_test.go | 153 ++ .../subcommand/partition-init/command.go | 25 +- .../subcommand/server-acl-init/command.go | 536 +++-- .../server-acl-init/command_ent_test.go | 463 ++-- .../server-acl-init/command_test.go | 2100 +++++++++++------ .../server-acl-init/connect_inject.go | 89 +- .../server-acl-init/connect_inject_test.go | 6 +- .../server-acl-init/create_or_update.go | 188 +- .../server-acl-init/create_or_update_test.go | 8 +- .../subcommand/server-acl-init/servers.go | 43 +- 91 files changed, 8066 insertions(+), 2408 deletions(-) rename acceptance/{tests => framework}/vault/helpers.go (57%) create mode 100644 acceptance/tests/snapshot-agent/main_test.go create mode 100644 acceptance/tests/snapshot-agent/snapshot_agent_k8s_secret_test.go create mode 100644 acceptance/tests/snapshot-agent/snapshot_agent_vault_test.go create mode 100644 acceptance/tests/vault/vault_partitions_test.go create mode 100644 charts/consul/templates/auth-method-clusterrole.yaml create mode 100644 charts/consul/templates/auth-method-clusterrolebinding.yaml create mode 100644 charts/consul/templates/auth-method-serviceaccount.yaml delete mode 100644 charts/consul/templates/connect-inject-authmethod-clusterrolebinding.yaml create mode 100644 charts/consul/test/unit/auth-method-clusterrole.bats create mode 100644 charts/consul/test/unit/auth-method-clusterrolebinding.bats create mode 100644 charts/consul/test/unit/auth-method-serviceaccount.bats delete mode 100644 charts/consul/test/unit/connect-inject-authmethod-clusterrolebinding.bats create mode 100644 control-plane/subcommand/consul-logout/command.go create mode 100644 control-plane/subcommand/consul-logout/command_test.go diff --git a/.circleci/config.yml b/.circleci/config.yml index 99f28279e0..b188f07748 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,8 +9,8 @@ executors: - image: docker.mirror.hashicorp.services/cimg/go:1.17.5 environment: TEST_RESULTS: /tmp/test-results # path to where test results are saved - CONSUL_VERSION: 1.11.2 # Consul's OSS version to use in tests - CONSUL_ENT_VERSION: 1.11.2+ent # Consul's enterprise version to use in tests + CONSUL_VERSION: 1.11.4 # Consul's OSS version to use in tests + CONSUL_ENT_VERSION: 1.11.4+ent # Consul's enterprise version to use in tests control-plane-path: &control-plane-path control-plane cli-path: &cli-path cli diff --git a/CHANGELOG.md b/CHANGELOG.md index bb3a550e10..86d56603ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,9 @@ IMPROVEMENTS: * Upgrade Docker image Alpine version from 3.14 to 3.15. [[GH-1058](https://github.com/hashicorp/consul-k8s/pull/1058)] * Helm * API Gateway: Allow controller to read Kubernetes namespaces in order to determine if route is allowed for gateway. [[GH-1092](https://github.com/hashicorp/consul-k8s/pull/1092)] + * Support a pre-configured bootstrap ACL token. [[GH-1125](https://github.com/hashicorp/consul-k8s/pull/1125)] +* Vault + * Enable snapshot agent configuration to be retrieved from vault. [[GH-1113](https://github.com/hashicorp/consul-k8s/pull/1113)] * CLI * Enable users to set up secondary clusters with existing federation secrets. [[GH-1126](https://github.com/hashicorp/consul-k8s/pull/1126)] diff --git a/acceptance/framework/consul/helm_cluster.go b/acceptance/framework/consul/helm_cluster.go index d1f092d5e5..aa169deb65 100644 --- a/acceptance/framework/consul/helm_cluster.go +++ b/acceptance/framework/consul/helm_cluster.go @@ -386,25 +386,7 @@ func configurePodSecurityPolicies(t *testing.T, client kubernetes.Interface, cfg } func createOrUpdateLicenseSecret(t *testing.T, client kubernetes.Interface, cfg *config.TestConfig, namespace string) { - _, err := client.CoreV1().Secrets(namespace).Get(context.Background(), config.LicenseSecretName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - _, err := client.CoreV1().Secrets(namespace).Create(context.Background(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: config.LicenseSecretName, - }, - StringData: map[string]string{ - config.LicenseSecretKey: cfg.EnterpriseLicense, - }, - Type: corev1.SecretTypeOpaque, - }, metav1.CreateOptions{}) - require.NoError(t, err) - } else { - require.NoError(t, err) - } - - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - _ = client.CoreV1().Secrets(namespace).Delete(context.Background(), config.LicenseSecretName, metav1.DeleteOptions{}) - }) + CreateK8sSecret(t, client, cfg, namespace, config.LicenseSecretName, config.LicenseSecretKey, cfg.EnterpriseLicense) } // configureSCCs creates RoleBindings that bind the default service account to cluster roles @@ -470,3 +452,25 @@ func defaultValues() map[string]string { } return values } + +func CreateK8sSecret(t *testing.T, client kubernetes.Interface, cfg *config.TestConfig, namespace, secretName, secretKey, secret string) { + _, err := client.CoreV1().Secrets(namespace).Get(context.Background(), secretName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + _, err := client.CoreV1().Secrets(namespace).Create(context.Background(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + }, + StringData: map[string]string{ + secretKey: secret, + }, + Type: corev1.SecretTypeOpaque, + }, metav1.CreateOptions{}) + require.NoError(t, err) + } else { + require.NoError(t, err) + } + + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + _ = client.CoreV1().Secrets(namespace).Delete(context.Background(), secretName, metav1.DeleteOptions{}) + }) +} diff --git a/acceptance/tests/vault/helpers.go b/acceptance/framework/vault/helpers.go similarity index 57% rename from acceptance/tests/vault/helpers.go rename to acceptance/framework/vault/helpers.go index e24f24b718..bcb9f1df00 100644 --- a/acceptance/tests/vault/helpers.go +++ b/acceptance/framework/vault/helpers.go @@ -18,13 +18,14 @@ const ( path "consul/data/secret/gossip" { capabilities = ["read"] }` - replicationTokenPolicy = ` -path "consul/data/secret/replication" { - capabilities = ["read", "update"] + + tokenPolicyTemplate = ` +path "consul/data/secret/%s" { + capabilities = ["read"] }` enterpriseLicensePolicy = ` -path "consul/data/secret/enterpriselicense" { +path "consul/data/secret/license" { capabilities = ["read"] }` @@ -55,10 +56,15 @@ path "/%s/connect_inter/*" { path "pki/cert/ca" { capabilities = ["read"] }` + + snapshotAgentPolicy = ` +path "consul/data/secret/snapshot-agent-config" { + capabilities = ["read"] +}` ) -// generateGossipSecret generates a random 32 byte secret returned as a base64 encoded string. -func generateGossipSecret() (string, error) { +// GenerateGossipSecret generates a random 32 byte secret returned as a base64 encoded string. +func GenerateGossipSecret() (string, error) { // This code was copied from Consul's Keygen command: // https://github.com/hashicorp/consul/blob/d652cc86e3d0322102c2b5e9026c6a60f36c17a5/command/keygen/keygen.go @@ -74,16 +80,16 @@ func generateGossipSecret() (string, error) { return base64.StdEncoding.EncodeToString(key), nil } -// configureGossipVaultSecret generates a gossip encryption key, -// stores it in vault as a secret and configures a policy to access it. -func configureGossipVaultSecret(t *testing.T, vaultClient *vapi.Client) string { +// ConfigureGossipVaultSecret generates a gossip encryption key, +// stores it in Vault as a secret and configures a policy to access it. +func ConfigureGossipVaultSecret(t *testing.T, vaultClient *vapi.Client) string { // Create the Vault Policy for the gossip key. logger.Log(t, "Creating gossip policy") - err := vaultClient.Sys().PutPolicy("consul-gossip", gossipPolicy) + err := vaultClient.Sys().PutPolicy("gossip", gossipPolicy) require.NoError(t, err) // Generate the gossip secret. - gossipKey, err := generateGossipSecret() + gossipKey, err := GenerateGossipSecret() require.NoError(t, err) // Create the gossip secret. @@ -99,71 +105,75 @@ func configureGossipVaultSecret(t *testing.T, vaultClient *vapi.Client) string { return gossipKey } -// configureEnterpriseLicenseVaultSecret stores it in vault as a secret and configures a policy to access it. -func configureEnterpriseLicenseVaultSecret(t *testing.T, vaultClient *vapi.Client, cfg *config.TestConfig) { +// ConfigureEnterpriseLicenseVaultSecret stores it in Vault as a secret and configures a policy to access it. +func ConfigureEnterpriseLicenseVaultSecret(t *testing.T, vaultClient *vapi.Client, cfg *config.TestConfig) { // Create the enterprise license secret. logger.Log(t, "Creating the Enterprise License secret") params := map[string]interface{}{ "data": map[string]interface{}{ - "enterpriselicense": cfg.EnterpriseLicense, + "license": cfg.EnterpriseLicense, }, } - _, err := vaultClient.Logical().Write("consul/data/secret/enterpriselicense", params) + _, err := vaultClient.Logical().Write("consul/data/secret/license", params) require.NoError(t, err) - // Create the Vault Policy for the consul-enterpriselicense. - err = vaultClient.Sys().PutPolicy("consul-enterpriselicense", enterpriseLicensePolicy) + err = vaultClient.Sys().PutPolicy("license", enterpriseLicensePolicy) require.NoError(t, err) } -// configureKubernetesAuthRoles configures roles for the Kubernetes auth method -// that will be used by the test Helm chart installation. -func configureKubernetesAuthRoles(t *testing.T, vaultClient *vapi.Client, consulReleaseName, ns, authPath, datacenter string, cfg *config.TestConfig) { - consulClientServiceAccountName := fmt.Sprintf("%s-consul-client", consulReleaseName) - consulServerServiceAccountName := fmt.Sprintf("%s-consul-server", consulReleaseName) - sharedPolicies := "consul-gossip" - if cfg.EnableEnterprise { - sharedPolicies += ",consul-enterpriselicense" +// ConfigureSnapshotAgentSecret stores it in Vault as a secret and configures a policy to access it. +func ConfigureSnapshotAgentSecret(t *testing.T, vaultClient *vapi.Client, cfg *config.TestConfig, config string) { + logger.Log(t, "Creating the Snapshot Agent Config secret in Vault") + params := map[string]interface{}{ + "data": map[string]interface{}{ + "config": config, + }, } + _, err := vaultClient.Logical().Write("consul/data/secret/snapshot-agent-config", params) + require.NoError(t, err) + + err = vaultClient.Sys().PutPolicy("snapshot-agent-config", snapshotAgentPolicy) + require.NoError(t, err) +} + +// ConfigureKubernetesAuthRole configures a role in Vault for the component for the Kubernetes auth method +// that will be used by the test Helm chart installation. +func ConfigureKubernetesAuthRole(t *testing.T, vaultClient *vapi.Client, consulReleaseName, ns, authPath, component, policies string) { + componentServiceAccountName := fmt.Sprintf("%s-consul-%s", consulReleaseName, component) - // Create the Auth Roles for consul-server and consul-client. + // Create the Auth Roles for the component. // Auth roles bind policies to Kubernetes service accounts, which // then enables the Vault agent init container to call 'vault login' // with the Kubernetes auth method to obtain a Vault token. // Please see https://www.vaultproject.io/docs/auth/kubernetes#configuration // for more details. - logger.Log(t, "Creating the consul-server and consul-client roles") + logger.Logf(t, "Creating the %q", componentServiceAccountName) params := map[string]interface{}{ - "bound_service_account_names": consulClientServiceAccountName, + "bound_service_account_names": componentServiceAccountName, "bound_service_account_namespaces": ns, - "policies": sharedPolicies, + "policies": policies, "ttl": "24h", } - _, err := vaultClient.Logical().Write(fmt.Sprintf("auth/%s/role/consul-client", authPath), params) - require.NoError(t, err) - - params = map[string]interface{}{ - "bound_service_account_names": consulServerServiceAccountName, - "bound_service_account_namespaces": ns, - "policies": fmt.Sprintf(sharedPolicies+",connect-ca-%s,consul-server-%s,consul-replication-token", datacenter, datacenter), - "ttl": "24h", - } - _, err = vaultClient.Logical().Write(fmt.Sprintf("auth/%s/role/consul-server", authPath), params) + _, err := vaultClient.Logical().Write(fmt.Sprintf("auth/%s/role/%s", authPath, component), params) require.NoError(t, err) +} +// ConfigureConsulCAKubernetesAuthRole configures a role in Vault that allows all service accounts +// within the installation namespace access to the Consul server CA. +func ConfigureConsulCAKubernetesAuthRole(t *testing.T, vaultClient *vapi.Client, ns, authPath string) { // Create the CA role that all components will use to fetch the Server CA certs. - params = map[string]interface{}{ + params := map[string]interface{}{ "bound_service_account_names": "*", "bound_service_account_namespaces": ns, "policies": "consul-ca", "ttl": "24h", } - _, err = vaultClient.Logical().Write(fmt.Sprintf("auth/%s/role/consul-ca", authPath), params) + _, err := vaultClient.Logical().Write(fmt.Sprintf("auth/%s/role/consul-ca", authPath), params) require.NoError(t, err) } -// configurePKICA generates a CA in Vault. -func configurePKICA(t *testing.T, vaultClient *vapi.Client) { +// ConfigurePKICA generates a CA in Vault. +func ConfigurePKICA(t *testing.T, vaultClient *vapi.Client) { // Create root CA to issue Consul server certificates and the `consul-server` PKI role. // See https://learn.hashicorp.com/tutorials/consul/vault-pki-consul-secure-tls. // Generate the root CA. @@ -178,9 +188,9 @@ func configurePKICA(t *testing.T, vaultClient *vapi.Client) { require.NoError(t, err) } -// configurePKICertificates configures roles so that Consul server TLS certificates +// ConfigurePKICertificates configures roles in Vault so that Consul server TLS certificates // can be issued by Vault. -func configurePKICertificates(t *testing.T, vaultClient *vapi.Client, consulReleaseName, ns, datacenter string) string { +func ConfigurePKICertificates(t *testing.T, vaultClient *vapi.Client, consulReleaseName, ns, datacenter string) string { // Create the Vault PKI Role. consulServerDNSName := consulReleaseName + "-consul-server" allowedDomains := fmt.Sprintf("%s.consul,%s,%s.%s,%s.%s.svc", datacenter, consulServerDNSName, consulServerDNSName, ns, consulServerDNSName, ns) @@ -193,7 +203,7 @@ func configurePKICertificates(t *testing.T, vaultClient *vapi.Client, consulRele "max_ttl": "1h", } - pkiRoleName := fmt.Sprintf("consul-server-%s", datacenter) + pkiRoleName := fmt.Sprintf("server-cert-%s", datacenter) _, err := vaultClient.Logical().Write(fmt.Sprintf("pki/roles/%s", pkiRoleName), params) require.NoError(t, err) @@ -211,12 +221,14 @@ path %q { return certificateIssuePath } -// configureReplicationTokenVaultSecret generates a replication token secret ID, -// stores it in vault as a secret and configures a policy to access it. -func configureReplicationTokenVaultSecret(t *testing.T, vaultClient *vapi.Client, consulReleaseName, ns string, authMethodPaths ...string) string { - // Create the Vault Policy for the replication token. - logger.Log(t, "Creating replication token policy") - err := vaultClient.Sys().PutPolicy("consul-replication-token", replicationTokenPolicy) +// ConfigureACLTokenVaultSecret generates a token secret ID for a given name, +// stores it in Vault as a secret and configures a policy to access it. +func ConfigureACLTokenVaultSecret(t *testing.T, vaultClient *vapi.Client, tokenName string) string { + // Create the Vault Policy for the token. + logger.Logf(t, "Creating %s token policy", tokenName) + policyName := fmt.Sprintf("%s-token", tokenName) + tokenPolicy := fmt.Sprintf(tokenPolicyTemplate, tokenName) + err := vaultClient.Sys().PutPolicy(policyName, tokenPolicy) require.NoError(t, err) // Generate the token secret. @@ -224,34 +236,20 @@ func configureReplicationTokenVaultSecret(t *testing.T, vaultClient *vapi.Client require.NoError(t, err) // Create the replication token secret. - logger.Log(t, "Creating the replication token secret") + logger.Logf(t, "Creating the %s token secret", tokenName) params := map[string]interface{}{ "data": map[string]interface{}{ - "replication": token, + "token": token, }, } - _, err = vaultClient.Logical().Write("consul/data/secret/replication", params) + _, err = vaultClient.Logical().Write(fmt.Sprintf("consul/data/secret/%s", tokenName), params) require.NoError(t, err) - logger.Log(t, "Creating kubernetes auth role for the server-acl-init job") - serverACLInitSAName := fmt.Sprintf("%s-consul-server-acl-init", consulReleaseName) - params = map[string]interface{}{ - "bound_service_account_names": serverACLInitSAName, - "bound_service_account_namespaces": ns, - "policies": "consul-replication-token", - "ttl": "24h", - } - - for _, authMethodPath := range authMethodPaths { - _, err := vaultClient.Logical().Write(fmt.Sprintf("auth/%s/role/server-acl-init", authMethodPath), params) - require.NoError(t, err) - } - return token } -// createConnectCAPolicy creates the Vault Policy for the connect-ca in a given datacenter. -func createConnectCAPolicy(t *testing.T, vaultClient *vapi.Client, datacenter string) { +// CreateConnectCAPolicy creates the Vault Policy for the connect-ca in a given datacenter. +func CreateConnectCAPolicy(t *testing.T, vaultClient *vapi.Client, datacenter string) { err := vaultClient.Sys().PutPolicy( fmt.Sprintf("connect-ca-%s", datacenter), fmt.Sprintf(connectCAPolicyTemplate, datacenter, datacenter)) diff --git a/acceptance/go.mod b/acceptance/go.mod index 3c31530573..82affda09e 100644 --- a/acceptance/go.mod +++ b/acceptance/go.mod @@ -7,6 +7,7 @@ require ( github.com/hashicorp/consul-k8s/control-plane v0.0.0-20211207212234-aea9efea5638 github.com/hashicorp/consul/api v1.12.0 github.com/hashicorp/consul/sdk v0.9.0 + github.com/hashicorp/go-uuid v1.0.2 github.com/hashicorp/vault/api v1.2.0 github.com/stretchr/testify v1.7.0 gopkg.in/yaml.v2 v2.4.0 @@ -49,7 +50,6 @@ require ( github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 // indirect github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect - github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hashicorp/go-version v1.2.0 // indirect github.com/hashicorp/golang-lru v0.5.3 // indirect github.com/hashicorp/hcl v1.0.0 // indirect diff --git a/acceptance/tests/ingress-gateway/ingress_gateway_namespaces_test.go b/acceptance/tests/ingress-gateway/ingress_gateway_namespaces_test.go index f47f98d70f..7e659e424e 100644 --- a/acceptance/tests/ingress-gateway/ingress_gateway_namespaces_test.go +++ b/acceptance/tests/ingress-gateway/ingress_gateway_namespaces_test.go @@ -70,10 +70,11 @@ func TestIngressGatewaySingleNamespace(t *testing.T) { require.NoError(t, err) } + igName := "ingress-gateway" logger.Log(t, "upgrading with ingress gateways enabled") consulCluster.Upgrade(t, map[string]string{ "ingressGateways.enabled": "true", - "ingressGateways.gateways[0].name": "ingress-gateway", + "ingressGateways.gateways[0].name": igName, "ingressGateways.gateways[0].replicas": "1", "ingressGateways.gateways[0].consulNamespace": testNamespace, }) @@ -102,7 +103,7 @@ func TestIngressGatewaySingleNamespace(t *testing.T) { logger.Log(t, "creating config entry") created, _, err := consulClient.ConfigEntries().Set(&api.IngressGatewayConfigEntry{ Kind: api.IngressGateway, - Name: "ingress-gateway", + Name: igName, Namespace: testNamespace, Listeners: []api.IngressListener{ { @@ -120,7 +121,7 @@ func TestIngressGatewaySingleNamespace(t *testing.T) { require.NoError(t, err) require.Equal(t, true, created, "config entry failed") - ingressGatewayService := fmt.Sprintf("http://%s-consul-ingress-gateway.%s:8080/", releaseName, ctx.KubectlOptions(t).Namespace) + ingressGatewayService := fmt.Sprintf("http://%s-consul-%s.%s:8080/", releaseName, igName, ctx.KubectlOptions(t).Namespace) // If ACLs are enabled, test that intentions prevent connections. if c.secure { @@ -138,7 +139,7 @@ func TestIngressGatewaySingleNamespace(t *testing.T) { Namespace: testNamespace, Sources: []*api.SourceIntention{ { - Name: "ingress-gateway", + Name: igName, Namespace: testNamespace, Action: api.IntentionActionAllow, }, @@ -181,6 +182,7 @@ func TestIngressGatewayNamespaceMirroring(t *testing.T) { t.Run(name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) + igName := "ingress" // Install the Helm chart without the ingress gateway first // so that we can create the namespace for it. helmValues := map[string]string{ @@ -192,7 +194,7 @@ func TestIngressGatewayNamespaceMirroring(t *testing.T) { "global.tls.enabled": strconv.FormatBool(c.secure), "ingressGateways.enabled": "true", - "ingressGateways.gateways[0].name": "ingress-gateway", + "ingressGateways.gateways[0].name": igName, "ingressGateways.gateways[0].replicas": "1", } @@ -227,7 +229,7 @@ func TestIngressGatewayNamespaceMirroring(t *testing.T) { logger.Log(t, "creating config entry") created, _, err := consulClient.ConfigEntries().Set(&api.IngressGatewayConfigEntry{ Kind: api.IngressGateway, - Name: "ingress-gateway", + Name: igName, Namespace: "default", Listeners: []api.IngressListener{ { @@ -245,7 +247,7 @@ func TestIngressGatewayNamespaceMirroring(t *testing.T) { require.NoError(t, err) require.Equal(t, true, created, "config entry failed") - ingressGatewayService := fmt.Sprintf("http://%s-consul-ingress-gateway.%s:8080/", releaseName, ctx.KubectlOptions(t).Namespace) + ingressGatewayService := fmt.Sprintf("http://%s-consul-%s.%s:8080/", releaseName, igName, ctx.KubectlOptions(t).Namespace) // If ACLs are enabled, test that intentions prevent connections. if c.secure { @@ -263,7 +265,7 @@ func TestIngressGatewayNamespaceMirroring(t *testing.T) { Namespace: testNamespace, Sources: []*api.SourceIntention{ { - Name: "ingress-gateway", + Name: igName, Namespace: "default", Action: api.IntentionActionAllow, }, diff --git a/acceptance/tests/ingress-gateway/ingress_gateway_test.go b/acceptance/tests/ingress-gateway/ingress_gateway_test.go index 359b917a73..9c1fd4abbb 100644 --- a/acceptance/tests/ingress-gateway/ingress_gateway_test.go +++ b/acceptance/tests/ingress-gateway/ingress_gateway_test.go @@ -39,10 +39,11 @@ func TestIngressGateway(t *testing.T) { t.Run(name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) cfg := suite.Config() + igName := "ingress-gateway" helmValues := map[string]string{ "connectInject.enabled": "true", "ingressGateways.enabled": "true", - "ingressGateways.gateways[0].name": "ingress-gateway", + "ingressGateways.gateways[0].name": igName, "ingressGateways.gateways[0].replicas": "1", "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), @@ -70,7 +71,7 @@ func TestIngressGateway(t *testing.T) { // Create config entry created, _, err := consulClient.ConfigEntries().Set(&api.IngressGatewayConfigEntry{ Kind: api.IngressGateway, - Name: "ingress-gateway", + Name: igName, Listeners: []api.IngressListener{ { Port: 8080, @@ -94,7 +95,9 @@ func TestIngressGateway(t *testing.T) { // via the bounce pod. It should fail to connect with the // static-server pod because of intentions. logger.Log(t, "testing intentions prevent ingress") - k8s.CheckStaticServerConnectionFailing(t, k8sOptions, staticClientName, "-H", "Host: static-server.ingress.consul", fmt.Sprintf("http://%s-consul-ingress-gateway:8080/", releaseName)) + k8s.CheckStaticServerConnectionFailing(t, k8sOptions, + staticClientName, "-H", "Host: static-server.ingress.consul", + fmt.Sprintf("http://%s-consul-%s:8080/", releaseName, igName)) // Now we create the allow intention. logger.Log(t, "creating ingress-gateway => static-server intention") @@ -103,7 +106,7 @@ func TestIngressGateway(t *testing.T) { Name: "static-server", Sources: []*api.SourceIntention{ { - Name: "ingress-gateway", + Name: igName, Action: api.IntentionActionAllow, }, }, @@ -114,7 +117,9 @@ func TestIngressGateway(t *testing.T) { // Test that we can make a call to the ingress gateway // via the static-client pod. It should route to the static-server pod. logger.Log(t, "trying calls to ingress gateway") - k8s.CheckStaticServerConnectionSuccessful(t, k8sOptions, staticClientName, "-H", "Host: static-server.ingress.consul", fmt.Sprintf("http://%s-consul-ingress-gateway:8080/", releaseName)) + k8s.CheckStaticServerConnectionSuccessful(t, k8sOptions, + staticClientName, "-H", "Host: static-server.ingress.consul", + fmt.Sprintf("http://%s-consul-%s:8080/", releaseName, igName)) }) } } diff --git a/acceptance/tests/mesh-gateway/mesh_gateway_test.go b/acceptance/tests/mesh-gateway/mesh_gateway_test.go index 230f5b01f4..77139aa706 100644 --- a/acceptance/tests/mesh-gateway/mesh_gateway_test.go +++ b/acceptance/tests/mesh-gateway/mesh_gateway_test.go @@ -33,8 +33,9 @@ func TestMeshGatewayDefault(t *testing.T) { "global.federation.enabled": "true", "global.federation.createFederationSecret": "true", - "connectInject.enabled": "true", - "controller.enabled": "true", + "connectInject.enabled": "true", + "connectInject.replicas": "1", + "controller.enabled": "true", "meshGateway.enabled": "true", "meshGateway.replicas": "1", @@ -79,7 +80,9 @@ func TestMeshGatewayDefault(t *testing.T) { "server.extraVolumes[0].items[0].key": "serverConfigJSON", "server.extraVolumes[0].items[0].path": "config.json", - "connectInject.enabled": "true", + "connectInject.enabled": "true", + "connectInject.replicas": "1", + "controller.enabled": "true", "meshGateway.enabled": "true", "meshGateway.replicas": "1", @@ -164,8 +167,9 @@ func TestMeshGatewaySecure(t *testing.T) { "global.federation.enabled": "true", "global.federation.createFederationSecret": "true", - "connectInject.enabled": "true", - "controller.enabled": "true", + "connectInject.enabled": "true", + "connectInject.replicas": "1", + "controller.enabled": "true", "meshGateway.enabled": "true", "meshGateway.replicas": "1", @@ -191,6 +195,19 @@ func TestMeshGatewaySecure(t *testing.T) { _, err = secondaryContext.KubernetesClient(t).CoreV1().Secrets(secondaryContext.KubectlOptions(t).Namespace).Create(context.Background(), federationSecret, metav1.CreateOptions{}) require.NoError(t, err) + var k8sAuthMethodHost string + // When running on kind, the kube API address in kubeconfig will have a localhost address + // which will not work from inside the container. That's why we need to use the endpoints address instead + // which will point the node IP. + if cfg.UseKind { + // The Kubernetes AuthMethod host is read from the endpoints for the Kubernetes service. + kubernetesEndpoint, err := secondaryContext.KubernetesClient(t).CoreV1().Endpoints("default").Get(context.Background(), "kubernetes", metav1.GetOptions{}) + require.NoError(t, err) + k8sAuthMethodHost = fmt.Sprintf("%s:%d", kubernetesEndpoint.Subsets[0].Addresses[0].IP, kubernetesEndpoint.Subsets[0].Ports[0].Port) + } else { + k8sAuthMethodHost = k8s.KubernetesAPIServerHostFromOptions(t, secondaryContext.KubectlOptions(t)) + } + // Create secondary cluster secondaryHelmValues := map[string]string{ "global.datacenter": "dc2", @@ -207,7 +224,9 @@ func TestMeshGatewaySecure(t *testing.T) { "global.acls.replicationToken.secretName": federationSecretName, "global.acls.replicationToken.secretKey": "replicationToken", - "global.federation.enabled": "true", + "global.federation.enabled": "true", + "global.federation.k8sAuthMethodHost": k8sAuthMethodHost, + "global.federation.primaryDatacenter": "dc1", "server.extraVolumes[0].type": "secret", "server.extraVolumes[0].name": federationSecretName, @@ -215,7 +234,9 @@ func TestMeshGatewaySecure(t *testing.T) { "server.extraVolumes[0].items[0].key": "serverConfigJSON", "server.extraVolumes[0].items[0].path": "config.json", - "connectInject.enabled": "true", + "connectInject.enabled": "true", + "connectInject.replicas": "1", + "controller.enabled": "true", "meshGateway.enabled": "true", "meshGateway.replicas": "1", @@ -248,9 +269,9 @@ func TestMeshGatewaySecure(t *testing.T) { // gateways. logger.Log(t, "creating proxy-defaults config") kustomizeDir := "../fixtures/bases/mesh-gateway" - k8s.KubectlApplyK(t, primaryContext.KubectlOptions(t), kustomizeDir) + k8s.KubectlApplyK(t, secondaryContext.KubectlOptions(t), kustomizeDir) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, primaryContext.KubectlOptions(t), kustomizeDir) + k8s.KubectlDeleteK(t, secondaryContext.KubectlOptions(t), kustomizeDir) }) // Check that we can connect services over the mesh gateways diff --git a/acceptance/tests/snapshot-agent/main_test.go b/acceptance/tests/snapshot-agent/main_test.go new file mode 100644 index 0000000000..daa389d4c4 --- /dev/null +++ b/acceptance/tests/snapshot-agent/main_test.go @@ -0,0 +1,15 @@ +package snapshotagent + +import ( + "os" + "testing" + + testsuite "github.com/hashicorp/consul-k8s/acceptance/framework/suite" +) + +var suite testsuite.Suite + +func TestMain(m *testing.M) { + suite = testsuite.NewSuite(m) + os.Exit(suite.Run()) +} diff --git a/acceptance/tests/snapshot-agent/snapshot_agent_k8s_secret_test.go b/acceptance/tests/snapshot-agent/snapshot_agent_k8s_secret_test.go new file mode 100644 index 0000000000..4ef08dec6e --- /dev/null +++ b/acceptance/tests/snapshot-agent/snapshot_agent_k8s_secret_test.go @@ -0,0 +1,134 @@ +package snapshotagent + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + terratestLogger "github.com/gruntwork-io/terratest/modules/logger" + "github.com/hashicorp/consul-k8s/acceptance/framework/consul" + "github.com/hashicorp/consul-k8s/acceptance/framework/environment" + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/go-uuid" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TestSnapshotAgent_K8sSecret installs snapshot agent config with an embedded token as a k8s secret. +// It then installs Consul with k8s as a secrets backend and verifies that snapshot files +// are generated. +// Currently, the token needs to be embedded in the snapshot agent config due to a Consul +// bug that does not recognize the token for snapshot command being configured via +// a command line arg or an environment variable. +func TestSnapshotAgent_K8sSecret(t *testing.T) { + cfg := suite.Config() + ctx := suite.Environment().DefaultContext(t) + kubectlOptions := ctx.KubectlOptions(t) + ns := kubectlOptions.Namespace + releaseName := helpers.RandomName() + + // Generate a bootstrap token + bootstrapToken, err := uuid.GenerateUUID() + require.NoError(t, err) + + bsSecretName := fmt.Sprintf("%s-acl-bootstrap-token", releaseName) + bsSecretKey := "token" + saSecretName := fmt.Sprintf("%s-snapshot-agent-config", releaseName) + saSecretKey := "token" + + // Create cluster + helmValues := map[string]string{ + "global.tls.enabled": "true", + "global.gossipEncryption.autoGenerate": "true", + "global.acls.manageSystemACLs": "true", + "global.acls.bootstrapToken.secretName": bsSecretName, + "global.acls.bootstrapToken.secretKey": bsSecretKey, + "client.snapshotAgent.enabled": "true", + "client.snapshotAgent.configSecret.secretName": saSecretName, + "client.snapshotAgent.configSecret.secretKey": saSecretKey, + } + + // Get new cluster + consulCluster := consul.NewHelmCluster(t, helmValues, suite.Environment().DefaultContext(t), cfg, releaseName) + client := environment.KubernetesClientFromOptions(t, kubectlOptions) + + // Add bootstrap token secret + logger.Log(t, "Storing bootstrap token as a k8s secret") + consul.CreateK8sSecret(t, client, cfg, ns, bsSecretName, bsSecretKey, bootstrapToken) + + // Add snapshot agent config secret + logger.Log(t, "Storing snapshot agent config as a k8s secret") + config := generateSnapshotAgentConfig(t, bootstrapToken) + logger.Logf(t, "Snapshot agent config: %s", config) + consul.CreateK8sSecret(t, client, cfg, ns, saSecretName, saSecretKey, config) + + // Create cluster + consulCluster.Create(t) + // ---------------------------------- + + // Validate that consul snapshot agent is running correctly and is generating snapshot files + logger.Log(t, "Confirming that Consul Snapshot Agent is generating snapshot files") + // Create k8s client from kubectl options. + + podList, err := client.CoreV1().Pods(kubectlOptions.Namespace).List(context.Background(), + metav1.ListOptions{LabelSelector: fmt.Sprintf("app=consul,component=client-snapshot-agent,release=%s", releaseName)}) + require.NoError(t, err) + require.True(t, len(podList.Items) > 0) + + // Wait for 10seconds to allow snapsot to write. + time.Sleep(10 * time.Second) + + // Loop through snapshot agents. Only one will be the leader and have the snapshot files. + hasSnapshots := false + for _, pod := range podList.Items { + snapshotFileListOutput, err := k8s.RunKubectlAndGetOutputWithLoggerE(t, kubectlOptions, terratestLogger.Discard, "exec", pod.Name, "-c", "consul-snapshot-agent", "--", "ls", "/") + logger.Logf(t, "Snapshot: \n%s", snapshotFileListOutput) + require.NoError(t, err) + if strings.Contains(snapshotFileListOutput, ".snap") { + logger.Logf(t, "Agent pod contains snapshot files") + hasSnapshots = true + break + } else { + logger.Logf(t, "Agent pod does not contain snapshot files") + } + } + require.True(t, hasSnapshots, ".snap") +} + +func generateSnapshotAgentConfig(t *testing.T, token string) string { + config := map[string]interface{}{ + "snapshot_agent": map[string]interface{}{ + "token": token, + "log": map[string]interface{}{ + "level": "INFO", + "enable_syslog": false, + "syslog_facility": "LOCAL0", + }, + "snapshot": map[string]interface{}{ + "interval": "5s", + "retain": 30, + "stale": false, + "service": "consul-snapshot", + "deregister_after": "72h", + "lock_key": "consul-snapshot/lock", + "max_failures": 3, + "local_scratch_path": "", + }, + "local_storage": map[string]interface{}{ + "path": ".", + }, + }, + } + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(config) + require.NoError(t, err) + jsonConfig, err := json.Marshal(&config) + require.NoError(t, err) + return string(jsonConfig) +} diff --git a/acceptance/tests/snapshot-agent/snapshot_agent_vault_test.go b/acceptance/tests/snapshot-agent/snapshot_agent_vault_test.go new file mode 100644 index 0000000000..533ee6aa4b --- /dev/null +++ b/acceptance/tests/snapshot-agent/snapshot_agent_vault_test.go @@ -0,0 +1,141 @@ +package snapshotagent + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + terratestLogger "github.com/gruntwork-io/terratest/modules/logger" + "github.com/hashicorp/consul-k8s/acceptance/framework/consul" + "github.com/hashicorp/consul-k8s/acceptance/framework/environment" + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul-k8s/acceptance/framework/vault" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TestSnapshotAgent_Vault installs snapshot agent config with an embedded token as a Vault secret. +// It then installs Consul with Vault as a secrets backend and verifies that snapshot files +// are generated. +// Currently, the token needs to be embedded in the snapshot agent config due to a Consul +// bug that does not recognize the token for snapshot command being configured via +// a command line arg or an environment variable. +func TestSnapshotAgent_Vault(t *testing.T) { + cfg := suite.Config() + ctx := suite.Environment().DefaultContext(t) + kubectlOptions := ctx.KubectlOptions(t) + ns := kubectlOptions.Namespace + + consulReleaseName := helpers.RandomName() + vaultReleaseName := helpers.RandomName() + + vaultCluster := vault.NewVaultCluster(t, ctx, cfg, vaultReleaseName, nil) + vaultCluster.Create(t, ctx) + // Vault is now installed in the cluster. + + // Now fetch the Vault client so we can create the policies and secrets. + vaultClient := vaultCluster.VaultClient(t) + + vault.CreateConnectCAPolicy(t, vaultClient, "dc1") + if cfg.EnableEnterprise { + vault.ConfigureEnterpriseLicenseVaultSecret(t, vaultClient, cfg) + } + + bootstrapToken := vault.ConfigureACLTokenVaultSecret(t, vaultClient, "bootstrap") + + config := generateSnapshotAgentConfig(t, bootstrapToken) + vault.ConfigureSnapshotAgentSecret(t, vaultClient, cfg, config) + + serverPolicies := "gossip,connect-ca-dc1,server-cert-dc1,bootstrap-token" + if cfg.EnableEnterprise { + serverPolicies += ",license" + } + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes", "server", serverPolicies) + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes", "client", "gossip") + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes", "server-acl-init", "bootstrap-token") + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes", "snapshot-agent", "snapshot-agent-config,license") + vault.ConfigureConsulCAKubernetesAuthRole(t, vaultClient, ns, "kubernetes") + + vault.ConfigurePKICA(t, vaultClient) + certPath := vault.ConfigurePKICertificates(t, vaultClient, consulReleaseName, ns, "dc1") + + vaultCASecret := vault.CASecretName(vaultReleaseName) + + consulHelmValues := map[string]string{ + "server.extraVolumes[0].type": "secret", + "server.extraVolumes[0].name": vaultCASecret, + "server.extraVolumes[0].load": "false", + + "connectInject.enabled": "true", + "connectInject.replicas": "1", + "controller.enabled": "true", + + "global.secretsBackend.vault.enabled": "true", + "global.secretsBackend.vault.consulServerRole": "server", + "global.secretsBackend.vault.consulClientRole": "client", + "global.secretsBackend.vault.consulCARole": "consul-ca", + "global.secretsBackend.vault.manageSystemACLsRole": "server-acl-init", + + "global.secretsBackend.vault.ca.secretName": vaultCASecret, + "global.secretsBackend.vault.ca.secretKey": "tls.crt", + + "global.secretsBackend.vault.connectCA.address": vaultCluster.Address(), + "global.secretsBackend.vault.connectCA.rootPKIPath": "connect_root", + "global.secretsBackend.vault.connectCA.intermediatePKIPath": "dc1/connect_inter", + + "global.acls.manageSystemACLs": "true", + "global.acls.bootstrapToken.secretName": "consul/data/secret/bootstrap", + "global.acls.bootstrapToken.secretKey": "token", + "global.tls.enabled": "true", + + "server.serverCert.secretName": certPath, + "global.tls.caCert.secretName": "pki/cert/ca", + "global.tls.enableAutoEncrypt": "true", + + "client.snapshotAgent.enabled": "true", + "client.snapshotAgent.configSecret.secretName": "consul/data/secret/snapshot-agent-config", + "client.snapshotAgent.configSecret.secretKey": "config", + "global.secretsBackend.vault.consulSnapshotAgentRole": "snapshot-agent", + } + + if cfg.EnableEnterprise { + consulHelmValues["global.enterpriseLicense.secretName"] = "consul/data/secret/license" + consulHelmValues["global.enterpriseLicense.secretKey"] = "license" + } + + logger.Log(t, "Installing Consul") + consulCluster := consul.NewHelmCluster(t, consulHelmValues, ctx, cfg, consulReleaseName) + consulCluster.Create(t) + + // Validate that consul snapshot agent is running correctly and is generating snapshot files + logger.Log(t, "Confirming that Consul Snapshot Agent is generating snapshot files") + // Create k8s client from kubectl options. + client := environment.KubernetesClientFromOptions(t, kubectlOptions) + podList, err := client.CoreV1().Pods(kubectlOptions.Namespace).List(context.Background(), + metav1.ListOptions{LabelSelector: fmt.Sprintf("app=consul,component=client-snapshot-agent,release=%s", consulReleaseName)}) + require.NoError(t, err) + require.True(t, len(podList.Items) > 0) + + // Wait for 10seconds to allow snapsot to write. + time.Sleep(10 * time.Second) + + // Loop through snapshot agents. Only one will be the leader and have the snapshot files. + hasSnapshots := false + for _, pod := range podList.Items { + snapshotFileListOutput, err := k8s.RunKubectlAndGetOutputWithLoggerE(t, kubectlOptions, terratestLogger.Discard, "exec", pod.Name, "-c", "consul-snapshot-agent", "--", "ls", "/") + logger.Logf(t, "Snapshot: \n%s", snapshotFileListOutput) + require.NoError(t, err) + if strings.Contains(snapshotFileListOutput, ".snap") { + logger.Logf(t, "Agent pod contains snapshot files") + hasSnapshots = true + break + } else { + logger.Logf(t, "Agent pod does not contain snapshot files") + } + } + require.True(t, hasSnapshots) +} diff --git a/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go b/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go index 76510b9a76..2225746b6f 100644 --- a/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go +++ b/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go @@ -97,11 +97,11 @@ func TestTerminatingGatewaySingleNamespace(t *testing.T) { // Register the external service. registerExternalService(t, consulClient, testNamespace) - // If ACLs are enabled we need to update the token of the terminating gateway + // If ACLs are enabled we need to update the role of the terminating gateway // with service:write permissions to the static-server service // so that it can can request Connect certificates for it. if c.secure { - updateTerminatingGatewayToken(t, consulClient, fmt.Sprintf(staticServerPolicyRulesNamespace, testNamespace)) + updateTerminatingGatewayRole(t, consulClient, fmt.Sprintf(staticServerPolicyRulesNamespace, testNamespace)) } // Create the config entry for the terminating gateway. @@ -205,11 +205,11 @@ func TestTerminatingGatewayNamespaceMirroring(t *testing.T) { // Register the external service registerExternalService(t, consulClient, testNamespace) - // If ACLs are enabled we need to update the token of the terminating gateway + // If ACLs are enabled we need to update the role of the terminating gateway // with service:write permissions to the static-server service // so that it can can request Connect certificates for it. if c.secure { - updateTerminatingGatewayToken(t, consulClient, fmt.Sprintf(staticServerPolicyRulesNamespace, testNamespace)) + updateTerminatingGatewayRole(t, consulClient, fmt.Sprintf(staticServerPolicyRulesNamespace, testNamespace)) } // Create the config entry for the terminating gateway diff --git a/acceptance/tests/terminating-gateway/terminating_gateway_test.go b/acceptance/tests/terminating-gateway/terminating_gateway_test.go index cb362d4445..f87614f3e0 100644 --- a/acceptance/tests/terminating-gateway/terminating_gateway_test.go +++ b/acceptance/tests/terminating-gateway/terminating_gateway_test.go @@ -69,11 +69,11 @@ func TestTerminatingGateway(t *testing.T) { // Register the external service registerExternalService(t, consulClient, "") - // If ACLs are enabled we need to update the token of the terminating gateway + // If ACLs are enabled we need to update the role of the terminating gateway // with service:write permissions to the static-server service // so that it can can request Connect certificates for it. if c.secure { - updateTerminatingGatewayToken(t, consulClient, staticServerPolicyRules) + updateTerminatingGatewayRole(t, consulClient, staticServerPolicyRules) } // Create the config entry for the terminating gateway. @@ -133,32 +133,32 @@ func registerExternalService(t *testing.T, consulClient *api.Client, namespace s require.NoError(t, err) } -func updateTerminatingGatewayToken(t *testing.T, consulClient *api.Client, rules string) { +func updateTerminatingGatewayRole(t *testing.T, consulClient *api.Client, rules string) { t.Helper() - // Create a write policy for the static-server. + logger.Log(t, "creating a write policy for the static-server") _, _, err := consulClient.ACL().PolicyCreate(&api.ACLPolicy{ Name: "static-server-write-policy", Rules: rules, }, nil) require.NoError(t, err) - // Get the terminating gateway token. - tokens, _, err := consulClient.ACL().TokenList(nil) + logger.Log(t, "getting the terminating gateway role") + roles, _, err := consulClient.ACL().RoleList(nil) require.NoError(t, err) - var termGwTokenID string - for _, token := range tokens { - if strings.Contains(token.Description, "terminating-gateway-terminating-gateway-token") { - termGwTokenID = token.AccessorID + terminatingGatewayRoleID := "" + for _, role := range roles { + if strings.Contains(role.Name, "terminating-gateway") { + terminatingGatewayRoleID = role.ID break } } - termGwToken, _, err := consulClient.ACL().TokenRead(termGwTokenID, nil) - require.NoError(t, err) - // Add policy to the token and update it - termGwToken.Policies = append(termGwToken.Policies, &api.ACLTokenPolicyLink{Name: "static-server-write-policy"}) - _, _, err = consulClient.ACL().TokenUpdate(termGwToken, nil) + logger.Log(t, "update role with policy") + termGwRole, _, err := consulClient.ACL().RoleRead(terminatingGatewayRoleID, nil) + require.NoError(t, err) + termGwRole.Policies = append(termGwRole.Policies, &api.ACLTokenPolicyLink{Name: "static-server-write-policy"}) + _, _, err = consulClient.ACL().RoleUpdate(termGwRole, nil) require.NoError(t, err) } diff --git a/acceptance/tests/vault/vault_partitions_test.go b/acceptance/tests/vault/vault_partitions_test.go new file mode 100644 index 0000000000..e650e64864 --- /dev/null +++ b/acceptance/tests/vault/vault_partitions_test.go @@ -0,0 +1,250 @@ +package vault + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/consul-k8s/acceptance/framework/consul" + "github.com/hashicorp/consul-k8s/acceptance/framework/environment" + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul-k8s/acceptance/framework/vault" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestVault_Partitions(t *testing.T) { + env := suite.Environment() + cfg := suite.Config() + serverClusterCtx := env.DefaultContext(t) + clientClusterCtx := env.Context(t, environment.SecondaryContextName) + ns := serverClusterCtx.KubectlOptions(t).Namespace + + const secondaryPartition = "secondary" + + if !cfg.EnableEnterprise { + t.Skipf("skipping this test because -enable-enterprise is not set") + } + if !cfg.EnableMultiCluster { + t.Skipf("skipping this test because -enable-multi-cluster is not set") + } + vaultReleaseName := helpers.RandomName() + consulReleaseName := helpers.RandomName() + + // In the primary cluster, we will expose Vault server as a Load balancer + // or a NodePort service so that the secondary can connect to it. + serverClusterVaultHelmValues := map[string]string{ + "server.service.type": "LoadBalancer", + } + if cfg.UseKind { + serverClusterVaultHelmValues["server.service.type"] = "NodePort" + serverClusterVaultHelmValues["server.service.nodePort"] = "31000" + } + serverClusterVault := vault.NewVaultCluster(t, serverClusterCtx, cfg, vaultReleaseName, serverClusterVaultHelmValues) + serverClusterVault.Create(t, serverClusterCtx) + + externalVaultAddress := vaultAddress(t, cfg, serverClusterCtx, vaultReleaseName) + + // In the secondary cluster, we will only deploy the agent injector and provide + // it with the primary's Vault address. We also want to configure the injector with + // a different k8s auth method path since the secondary cluster will need its own auth method. + clientClusterVaultHelmValues := map[string]string{ + "server.enabled": "false", + "injector.externalVaultAddr": externalVaultAddress, + "injector.authPath": "auth/kubernetes-" + secondaryPartition, + } + + secondaryVaultCluster := vault.NewVaultCluster(t, clientClusterCtx, cfg, vaultReleaseName, clientClusterVaultHelmValues) + secondaryVaultCluster.Create(t, clientClusterCtx) + + vaultClient := serverClusterVault.VaultClient(t) + + // Configure Vault Kubernetes auth method for the secondary cluster. + { + // Create auth method service account and ClusterRoleBinding. The Vault server + // in the primary cluster will use this service account token to talk to the secondary + // Kubernetes cluster. + // This ClusterRoleBinding is adapted from the Vault server's role: + // https://github.com/hashicorp/vault-helm/blob/b0528fce49c529f2c37953ea3a14f30ed651e0d6/templates/server-clusterrolebinding.yaml + + // Use a single name for all RBAC objects. + authMethodRBACName := fmt.Sprintf("%s-vault-auth-method", vaultReleaseName) + _, err := clientClusterCtx.KubernetesClient(t).RbacV1().ClusterRoleBindings().Create(context.Background(), &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: authMethodRBACName, + }, + Subjects: []rbacv1.Subject{{Kind: rbacv1.ServiceAccountKind, Name: authMethodRBACName, Namespace: ns}}, + RoleRef: rbacv1.RoleRef{APIGroup: "rbac.authorization.k8s.io", Name: "system:auth-delegator", Kind: "ClusterRole"}, + }, metav1.CreateOptions{}) + require.NoError(t, err) + + // Create service account for the auth method in the secondary cluster. + _, err = clientClusterCtx.KubernetesClient(t).CoreV1().ServiceAccounts(ns).Create(context.Background(), &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: authMethodRBACName, + }, + }, metav1.CreateOptions{}) + require.NoError(t, err) + t.Cleanup(func() { + clientClusterCtx.KubernetesClient(t).RbacV1().ClusterRoleBindings().Delete(context.Background(), authMethodRBACName, metav1.DeleteOptions{}) + clientClusterCtx.KubernetesClient(t).CoreV1().ServiceAccounts(ns).Delete(context.Background(), authMethodRBACName, metav1.DeleteOptions{}) + }) + + // Figure out the host for the Kubernetes API. This needs to be reachable from the Vault server + // in the primary cluster. + k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, clientClusterCtx) + + // Now, configure the auth method in Vault. + secondaryVaultCluster.ConfigureAuthMethod(t, vaultClient, "kubernetes-"+secondaryPartition, k8sAuthMethodHost, authMethodRBACName, ns) + } + + vault.ConfigureGossipVaultSecret(t, vaultClient) + vault.CreateConnectCAPolicy(t, vaultClient, "dc1") + vault.ConfigureEnterpriseLicenseVaultSecret(t, vaultClient, cfg) + vault.ConfigureACLTokenVaultSecret(t, vaultClient, "bootstrap") + vault.ConfigureACLTokenVaultSecret(t, vaultClient, "partition") + + serverPolicies := "gossip,license,connect-ca-dc1,server-cert-dc1,bootstrap-token" + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes", "server", serverPolicies) + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes", "client", "gossip") + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes", "server-acl-init", "bootstrap-token,partition-token") + vault.ConfigureConsulCAKubernetesAuthRole(t, vaultClient, ns, "kubernetes") + + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes-"+secondaryPartition, "client", "gossip") + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes-"+secondaryPartition, "server-acl-init", "partition-token") + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes-"+secondaryPartition, "partition-init", "partition-token") + vault.ConfigureConsulCAKubernetesAuthRole(t, vaultClient, ns, "kubernetes-"+secondaryPartition) + vault.ConfigurePKICA(t, vaultClient) + certPath := vault.ConfigurePKICertificates(t, vaultClient, consulReleaseName, ns, "dc1") + + vaultCASecretName := vault.CASecretName(vaultReleaseName) + + commonHelmValues := map[string]string{ + "global.adminPartitions.enabled": "true", + + "global.enableConsulNamespaces": "true", + + "connectInject.enabled": "true", + "connectInject.replicas": "1", + "controller.enabled": "true", + + "global.secretsBackend.vault.enabled": "true", + "global.secretsBackend.vault.consulClientRole": "client", + "global.secretsBackend.vault.consulCARole": "consul-ca", + "global.secretsBackend.vault.manageSystemACLsRole": "server-acl-init", + + "global.secretsBackend.vault.ca.secretName": vaultCASecretName, + "global.secretsBackend.vault.ca.secretKey": "tls.crt", + + "global.acls.manageSystemACLs": "true", + + "global.tls.enabled": "true", + "global.tls.enableAutoEncrypt": "true", + "global.tls.caCert.secretName": "pki/cert/ca", + + "global.gossipEncryption.secretName": "consul/data/secret/gossip", + "global.gossipEncryption.secretKey": "gossip", + + "global.enterpriseLicense.secretName": "consul/data/secret/license", + "global.enterpriseLicense.secretKey": "license", + } + + serverHelmValues := map[string]string{ + "global.secretsBackend.vault.consulServerRole": "server", + "global.secretsBackend.vault.connectCA.address": serverClusterVault.Address(), + "global.secretsBackend.vault.connectCA.rootPKIPath": "connect_root", + "global.secretsBackend.vault.connectCA.intermediatePKIPath": "dc1/connect_inter", + + "global.acls.bootstrapToken.secretName": "consul/data/secret/bootstrap", + "global.acls.bootstrapToken.secretKey": "token", + "global.acls.partitionToken.secretName": "consul/data/secret/partition", + "global.acls.partitionToken.secretKey": "token", + + "server.exposeGossipAndRPCPorts": "true", + "server.serverCert.secretName": certPath, + + "server.extraVolumes[0].type": "secret", + "server.extraVolumes[0].name": vaultCASecretName, + "server.extraVolumes[0].load": "false", + } + + // On Kind, there are no load balancers but since all clusters + // share the same node network (docker bridge), we can use + // a NodePort service so that we can access node(s) in a different Kind cluster. + if cfg.UseKind { + serverHelmValues["global.adminPartitions.service.type"] = "NodePort" + serverHelmValues["global.adminPartitions.service.nodePort.https"] = "30000" + serverHelmValues["meshGateway.service.type"] = "NodePort" + serverHelmValues["meshGateway.service.nodePort"] = "30100" + } + + helpers.MergeMaps(serverHelmValues, commonHelmValues) + + logger.Log(t, "Installing Consul") + consulCluster := consul.NewHelmCluster(t, serverHelmValues, serverClusterCtx, cfg, consulReleaseName) + consulCluster.Create(t) + + partitionServiceName := fmt.Sprintf("%s-consul-partition", consulReleaseName) + partitionSvcAddress := k8s.ServiceHost(t, cfg, serverClusterCtx, partitionServiceName) + + k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, clientClusterCtx) + + // Move Vault CA secret from primary to secondary so that we can mount it to pods in the + // secondary cluster. + logger.Logf(t, "retrieving Vault CA secret %s from the primary cluster and applying to the secondary", vaultCASecretName) + vaultCASecret, err := serverClusterCtx.KubernetesClient(t).CoreV1().Secrets(ns).Get(context.Background(), vaultCASecretName, metav1.GetOptions{}) + vaultCASecret.ResourceVersion = "" + require.NoError(t, err) + _, err = clientClusterCtx.KubernetesClient(t).CoreV1().Secrets(ns).Create(context.Background(), vaultCASecret, metav1.CreateOptions{}) + require.NoError(t, err) + t.Cleanup(func() { + clientClusterCtx.KubernetesClient(t).CoreV1().Secrets(ns).Delete(context.Background(), vaultCASecretName, metav1.DeleteOptions{}) + }) + + // Create client cluster. + clientHelmValues := map[string]string{ + "global.enabled": "false", + + "global.adminPartitions.name": secondaryPartition, + + "global.acls.bootstrapToken.secretName": "consul/data/secret/partition", + "global.acls.bootstrapToken.secretKey": "token", + + "global.secretsBackend.vault.agentAnnotations": fmt.Sprintf("vault.hashicorp.com/tls-server-name: %s-vault", vaultReleaseName), + "global.secretsBackend.vault.adminPartitionsRole": "partition-init", + + "externalServers.enabled": "true", + "externalServers.hosts[0]": partitionSvcAddress, + "externalServers.tlsServerName": "server.dc1.consul", + "externalServers.k8sAuthMethodHost": k8sAuthMethodHost, + + "client.enabled": "true", + "client.exposeGossipPorts": "true", + "client.join[0]": partitionSvcAddress, + } + + if cfg.UseKind { + clientHelmValues["externalServers.httpsPort"] = "30000" + clientHelmValues["meshGateway.service.type"] = "NodePort" + clientHelmValues["meshGateway.service.nodePort"] = "30100" + } + + helpers.MergeMaps(clientHelmValues, commonHelmValues) + + // Install the consul cluster without servers in the client cluster kubernetes context. + clientConsulCluster := consul.NewHelmCluster(t, clientHelmValues, clientClusterCtx, cfg, consulReleaseName) + clientConsulCluster.Create(t) + + // Ensure consul clients are created. + agentPodList, err := clientClusterCtx.KubernetesClient(t).CoreV1().Pods(clientClusterCtx.KubectlOptions(t).Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app=consul,component=client"}) + require.NoError(t, err) + require.NotEmpty(t, agentPodList.Items) + + output, err := k8s.RunKubectlAndGetOutputE(t, clientClusterCtx.KubectlOptions(t), "logs", agentPodList.Items[0].Name, "consul", "-n", clientClusterCtx.KubectlOptions(t).Namespace) + require.NoError(t, err) + require.Contains(t, output, "Partition: 'secondary'") +} diff --git a/acceptance/tests/vault/vault_test.go b/acceptance/tests/vault/vault_test.go index dcf151f231..4e3b6d5305 100644 --- a/acceptance/tests/vault/vault_test.go +++ b/acceptance/tests/vault/vault_test.go @@ -1,7 +1,6 @@ package vault import ( - "context" "fmt" "testing" @@ -12,7 +11,6 @@ import ( "github.com/hashicorp/consul-k8s/acceptance/framework/logger" "github.com/hashicorp/consul-k8s/acceptance/framework/vault" "github.com/stretchr/testify/require" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const staticClientName = "static-client" @@ -34,17 +32,26 @@ func TestVault(t *testing.T) { // Now fetch the Vault client so we can create the policies and secrets. vaultClient := vaultCluster.VaultClient(t) - gossipKey := configureGossipVaultSecret(t, vaultClient) + gossipKey := vault.ConfigureGossipVaultSecret(t, vaultClient) - createConnectCAPolicy(t, vaultClient, "dc1") + vault.CreateConnectCAPolicy(t, vaultClient, "dc1") if cfg.EnableEnterprise { - configureEnterpriseLicenseVaultSecret(t, vaultClient, cfg) + vault.ConfigureEnterpriseLicenseVaultSecret(t, vaultClient, cfg) } - configureKubernetesAuthRoles(t, vaultClient, consulReleaseName, ns, "kubernetes", "dc1", cfg) + bootstrapToken := vault.ConfigureACLTokenVaultSecret(t, vaultClient, "bootstrap") - configurePKICA(t, vaultClient) - certPath := configurePKICertificates(t, vaultClient, consulReleaseName, ns, "dc1") + serverPolicies := "gossip,connect-ca-dc1,server-cert-dc1,bootstrap-token" + if cfg.EnableEnterprise { + serverPolicies += ",license" + } + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes", "server", serverPolicies) + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes", "client", "gossip") + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes", "server-acl-init", "bootstrap-token") + vault.ConfigureConsulCAKubernetesAuthRole(t, vaultClient, ns, "kubernetes") + + vault.ConfigurePKICA(t, vaultClient) + certPath := vault.ConfigurePKICertificates(t, vaultClient, consulReleaseName, ns, "dc1") vaultCASecret := vault.CASecretName(vaultReleaseName) @@ -57,10 +64,11 @@ func TestVault(t *testing.T) { "connectInject.replicas": "1", "controller.enabled": "true", - "global.secretsBackend.vault.enabled": "true", - "global.secretsBackend.vault.consulServerRole": "consul-server", - "global.secretsBackend.vault.consulClientRole": "consul-client", - "global.secretsBackend.vault.consulCARole": "consul-ca", + "global.secretsBackend.vault.enabled": "true", + "global.secretsBackend.vault.consulServerRole": "server", + "global.secretsBackend.vault.consulClientRole": "client", + "global.secretsBackend.vault.consulCARole": "consul-ca", + "global.secretsBackend.vault.manageSystemACLsRole": "server-acl-init", "global.secretsBackend.vault.ca.secretName": vaultCASecret, "global.secretsBackend.vault.ca.secretKey": "tls.crt", @@ -69,10 +77,12 @@ func TestVault(t *testing.T) { "global.secretsBackend.vault.connectCA.rootPKIPath": "connect_root", "global.secretsBackend.vault.connectCA.intermediatePKIPath": "dc1/connect_inter", - "global.acls.manageSystemACLs": "true", - "global.tls.enabled": "true", - "global.gossipEncryption.secretName": "consul/data/secret/gossip", - "global.gossipEncryption.secretKey": "gossip", + "global.acls.manageSystemACLs": "true", + "global.acls.bootstrapToken.secretName": "consul/data/secret/bootstrap", + "global.acls.bootstrapToken.secretKey": "token", + "global.tls.enabled": "true", + "global.gossipEncryption.secretName": "consul/data/secret/gossip", + "global.gossipEncryption.secretKey": "gossip", "ingressGateways.enabled": "true", "ingressGateways.defaults.replicas": "1", @@ -93,8 +103,8 @@ func TestVault(t *testing.T) { } if cfg.EnableEnterprise { - consulHelmValues["global.enterpriseLicense.secretName"] = "consul/data/secret/enterpriselicense" - consulHelmValues["global.enterpriseLicense.secretKey"] = "enterpriselicense" + consulHelmValues["global.enterpriseLicense.secretName"] = "consul/data/secret/license" + consulHelmValues["global.enterpriseLicense.secretKey"] = "license" } logger.Log(t, "Installing Consul") @@ -103,6 +113,7 @@ func TestVault(t *testing.T) { // Validate that the gossip encryption key is set correctly. logger.Log(t, "Validating the gossip key has been set correctly.") + consulCluster.ACLToken = bootstrapToken consulClient := consulCluster.SetupConsulClient(t, true) keys, err := consulClient.Operator().KeyringList(nil) require.NoError(t, err) @@ -116,12 +127,8 @@ func TestVault(t *testing.T) { require.Equal(t, caConfig.Provider, "vault") // Validate that consul sever is running correctly and the consul members command works - tokenSecret, err := ctx.KubernetesClient(t).CoreV1().Secrets(ns).Get(context.Background(), fmt.Sprintf("%s-consul-bootstrap-acl-token", consulReleaseName), metav1.GetOptions{}) - require.NoError(t, err) - token := string(tokenSecret.Data["token"]) - logger.Log(t, "Confirming that we can run Consul commands when exec'ing into server container") - membersOutput, err := k8s.RunKubectlAndGetOutputWithLoggerE(t, ctx.KubectlOptions(t), terratestLogger.Discard, "exec", fmt.Sprintf("%s-consul-server-0", consulReleaseName), "-c", "consul", "--", "sh", "-c", fmt.Sprintf("CONSUL_HTTP_TOKEN=%s consul members", token)) + membersOutput, err := k8s.RunKubectlAndGetOutputWithLoggerE(t, ctx.KubectlOptions(t), terratestLogger.Discard, "exec", fmt.Sprintf("%s-consul-server-0", consulReleaseName), "-c", "consul", "--", "sh", "-c", fmt.Sprintf("CONSUL_HTTP_TOKEN=%s consul members", bootstrapToken)) logger.Logf(t, "Members: \n%s", membersOutput) require.NoError(t, err) require.Contains(t, membersOutput, fmt.Sprintf("%s-consul-server-0", consulReleaseName)) diff --git a/acceptance/tests/vault/vault_wan_fed_test.go b/acceptance/tests/vault/vault_wan_fed_test.go index e8ac60cf73..5f23a91f25 100644 --- a/acceptance/tests/vault/vault_wan_fed_test.go +++ b/acceptance/tests/vault/vault_wan_fed_test.go @@ -66,14 +66,12 @@ func TestVault_WANFederationViaGateways(t *testing.T) { vaultClient := primaryVaultCluster.VaultClient(t) - configureGossipVaultSecret(t, vaultClient) + vault.ConfigureGossipVaultSecret(t, vaultClient) if cfg.EnableEnterprise { - configureEnterpriseLicenseVaultSecret(t, vaultClient, cfg) + vault.ConfigureEnterpriseLicenseVaultSecret(t, vaultClient, cfg) } - configureKubernetesAuthRoles(t, vaultClient, consulReleaseName, ns, "kubernetes", "dc1", cfg) - // Configure Vault Kubernetes auth method for the secondary datacenter. { // Create auth method service account and ClusterRoleBinding. The Vault server @@ -113,27 +111,42 @@ func TestVault_WANFederationViaGateways(t *testing.T) { secondaryVaultCluster.ConfigureAuthMethod(t, vaultClient, "kubernetes-dc2", k8sAuthMethodHost, authMethodRBACName, ns) } - configureKubernetesAuthRoles(t, vaultClient, consulReleaseName, ns, "kubernetes-dc2", "dc2", cfg) + commonServerPolicies := "gossip" + if cfg.EnableEnterprise { + commonServerPolicies += ",license" + } + primaryServerPolicies := commonServerPolicies + ",connect-ca-dc1,server-cert-dc1,bootstrap-token" + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes", "server", primaryServerPolicies) + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes", "client", "gossip") + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes", "server-acl-init", "bootstrap-token,replication-token") + vault.ConfigureConsulCAKubernetesAuthRole(t, vaultClient, ns, "kubernetes") + + secondaryServerPolicies := commonServerPolicies + ",connect-ca-dc2,server-cert-dc2,replication-token" + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes-dc2", "server", secondaryServerPolicies) + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes-dc2", "client", "gossip") + vault.ConfigureKubernetesAuthRole(t, vaultClient, consulReleaseName, ns, "kubernetes-dc2", "server-acl-init", "replication-token") + vault.ConfigureConsulCAKubernetesAuthRole(t, vaultClient, ns, "kubernetes-dc2") // Generate a CA and create PKI roles for the primary and secondary Consul servers. - configurePKICA(t, vaultClient) - primaryCertPath := configurePKICertificates(t, vaultClient, consulReleaseName, ns, "dc1") - secondaryCertPath := configurePKICertificates(t, vaultClient, consulReleaseName, ns, "dc2") + vault.ConfigurePKICA(t, vaultClient) + primaryCertPath := vault.ConfigurePKICertificates(t, vaultClient, consulReleaseName, ns, "dc1") + secondaryCertPath := vault.ConfigurePKICertificates(t, vaultClient, consulReleaseName, ns, "dc2") - replicationToken := configureReplicationTokenVaultSecret(t, vaultClient, consulReleaseName, ns, "kubernetes", "kubernetes-dc2") + bootstrapToken := vault.ConfigureACLTokenVaultSecret(t, vaultClient, "bootstrap") + replicationToken := vault.ConfigureACLTokenVaultSecret(t, vaultClient, "replication") // Create the Vault Policy for the Connect CA in both datacenters. - createConnectCAPolicy(t, vaultClient, "dc1") - createConnectCAPolicy(t, vaultClient, "dc2") + vault.CreateConnectCAPolicy(t, vaultClient, "dc1") + vault.CreateConnectCAPolicy(t, vaultClient, "dc2") // Move Vault CA secret from primary to secondary so that we can mount it to pods in the // secondary cluster. vaultCASecretName := vault.CASecretName(vaultReleaseName) logger.Logf(t, "retrieving Vault CA secret %s from the primary cluster and applying to the secondary", vaultCASecretName) - vaultCASecret, err := primaryCtx.KubernetesClient(t).CoreV1().Secrets(primaryCtx.KubectlOptions(t).Namespace).Get(context.Background(), vaultCASecretName, metav1.GetOptions{}) + vaultCASecret, err := primaryCtx.KubernetesClient(t).CoreV1().Secrets(ns).Get(context.Background(), vaultCASecretName, metav1.GetOptions{}) vaultCASecret.ResourceVersion = "" require.NoError(t, err) - _, err = secondaryCtx.KubernetesClient(t).CoreV1().Secrets(secondaryCtx.KubectlOptions(t).Namespace).Create(context.Background(), vaultCASecret, metav1.CreateOptions{}) + _, err = secondaryCtx.KubernetesClient(t).CoreV1().Secrets(ns).Create(context.Background(), vaultCASecret, metav1.CreateOptions{}) require.NoError(t, err) t.Cleanup(func() { secondaryCtx.KubernetesClient(t).CoreV1().Secrets(ns).Delete(context.Background(), vaultCASecretName, metav1.DeleteOptions{}) @@ -156,9 +169,11 @@ func TestVault_WANFederationViaGateways(t *testing.T) { // ACL config. "global.acls.manageSystemACLs": "true", + "global.acls.bootstrapToken.secretName": "consul/data/secret/bootstrap", + "global.acls.bootstrapToken.secretKey": "token", "global.acls.createReplicationToken": "true", "global.acls.replicationToken.secretName": "consul/data/secret/replication", - "global.acls.replicationToken.secretKey": "replication", + "global.acls.replicationToken.secretKey": "token", // Mesh config. "connectInject.enabled": "true", @@ -173,8 +188,8 @@ func TestVault_WANFederationViaGateways(t *testing.T) { // Vault config. "global.secretsBackend.vault.enabled": "true", - "global.secretsBackend.vault.consulServerRole": "consul-server", - "global.secretsBackend.vault.consulClientRole": "consul-client", + "global.secretsBackend.vault.consulServerRole": "server", + "global.secretsBackend.vault.consulClientRole": "client", "global.secretsBackend.vault.consulCARole": "consul-ca", "global.secretsBackend.vault.manageSystemACLsRole": "server-acl-init", "global.secretsBackend.vault.ca.secretName": vaultCASecretName, @@ -185,8 +200,8 @@ func TestVault_WANFederationViaGateways(t *testing.T) { } if cfg.EnableEnterprise { - primaryConsulHelmValues["global.enterpriseLicense.secretName"] = "consul/data/secret/enterpriselicense" - primaryConsulHelmValues["global.enterpriseLicense.secretKey"] = "enterpriselicense" + primaryConsulHelmValues["global.enterpriseLicense.secretName"] = "consul/data/secret/license" + primaryConsulHelmValues["global.enterpriseLicense.secretKey"] = "license" } if cfg.UseKind { @@ -197,12 +212,26 @@ func TestVault_WANFederationViaGateways(t *testing.T) { primaryConsulCluster := consul.NewHelmCluster(t, primaryConsulHelmValues, primaryCtx, cfg, consulReleaseName) primaryConsulCluster.Create(t) + var k8sAuthMethodHost string + // When running on kind, the kube API address in kubeconfig will have a localhost address + // which will not work from inside the container. That's why we need to use the endpoints address instead + // which will point the node IP. + if cfg.UseKind { + // The Kubernetes AuthMethod host is read from the endpoints for the Kubernetes service. + kubernetesEndpoint, err := secondaryCtx.KubernetesClient(t).CoreV1().Endpoints("default").Get(context.Background(), "kubernetes", metav1.GetOptions{}) + require.NoError(t, err) + k8sAuthMethodHost = fmt.Sprintf("%s:%d", kubernetesEndpoint.Subsets[0].Addresses[0].IP, kubernetesEndpoint.Subsets[0].Ports[0].Port) + } else { + k8sAuthMethodHost = k8s.KubernetesAPIServerHostFromOptions(t, secondaryCtx.KubectlOptions(t)) + } + // Get the address of the mesh gateway. primaryMeshGWAddress := meshGatewayAddress(t, cfg, primaryCtx, consulReleaseName) secondaryConsulHelmValues := map[string]string{ "global.datacenter": "dc2", "global.federation.enabled": "true", + "global.federation.k8sAuthMethodHost": k8sAuthMethodHost, "global.federation.primaryDatacenter": "dc1", "global.federation.primaryGateways[0]": primaryMeshGWAddress, @@ -219,7 +248,7 @@ func TestVault_WANFederationViaGateways(t *testing.T) { // ACL config. "global.acls.manageSystemACLs": "true", "global.acls.replicationToken.secretName": "consul/data/secret/replication", - "global.acls.replicationToken.secretKey": "replication", + "global.acls.replicationToken.secretKey": "token", // Mesh config. "connectInject.enabled": "true", @@ -233,8 +262,8 @@ func TestVault_WANFederationViaGateways(t *testing.T) { // Vault config. "global.secretsBackend.vault.enabled": "true", - "global.secretsBackend.vault.consulServerRole": "consul-server", - "global.secretsBackend.vault.consulClientRole": "consul-client", + "global.secretsBackend.vault.consulServerRole": "server", + "global.secretsBackend.vault.consulClientRole": "client", "global.secretsBackend.vault.consulCARole": "consul-ca", "global.secretsBackend.vault.manageSystemACLsRole": "server-acl-init", "global.secretsBackend.vault.ca.secretName": vaultCASecretName, @@ -248,8 +277,8 @@ func TestVault_WANFederationViaGateways(t *testing.T) { } if cfg.EnableEnterprise { - secondaryConsulHelmValues["global.enterpriseLicense.secretName"] = "consul/data/secret/enterpriselicense" - secondaryConsulHelmValues["global.enterpriseLicense.secretKey"] = "enterpriselicense" + secondaryConsulHelmValues["global.enterpriseLicense.secretName"] = "consul/data/secret/license" + secondaryConsulHelmValues["global.enterpriseLicense.secretKey"] = "license" } if cfg.UseKind { @@ -263,6 +292,7 @@ func TestVault_WANFederationViaGateways(t *testing.T) { // Verify federation between servers. logger.Log(t, "verifying federation was successful") + primaryConsulCluster.ACLToken = bootstrapToken primaryClient := primaryConsulCluster.SetupConsulClient(t, true) secondaryConsulCluster.ACLToken = replicationToken secondaryClient := secondaryConsulCluster.SetupConsulClient(t, true) diff --git a/charts/consul/templates/_helpers.tpl b/charts/consul/templates/_helpers.tpl index a4d170dfae..11d4998f03 100644 --- a/charts/consul/templates/_helpers.tpl +++ b/charts/consul/templates/_helpers.tpl @@ -48,7 +48,7 @@ as well as the global.name setting. {{- define "consul.serverTLSAltNames" -}} {{- $name := include "consul.fullname" . -}} {{- $ns := .Release.Namespace -}} -{{ printf "localhost,%s-server,*.%s-server,*.%s-server.%s,*.%s-server.%s.svc,*.server.%s.%s" $name $name $name $ns $name $ns (.Values.global.datacenter ) (.Values.global.domain) }}{{ include "consul.serverAdditionalDNSSANs" . }} +{{ printf "localhost,%s-server,*.%s-server,*.%s-server.%s,%s-server.%s,*.%s-server.%s.svc,%s-server.%s.svc,*.server.%s.%s" $name $name $name $ns $name $ns $name $ns $name $ns (.Values.global.datacenter ) (.Values.global.domain) }}{{ include "consul.serverAdditionalDNSSANs" . }} {{- end -}} {{- define "consul.serverAdditionalDNSSANs" -}} @@ -73,6 +73,13 @@ as well as the global.name setting. {{ "{{" }}- end -{{ "}}" }} {{- end -}} +{{- define "consul.vaultBootstrapTokenConfigTemplate" -}} +| + {{ "{{" }}- with secret "{{ .Values.global.acls.bootstrapToken.secretName }}" -{{ "}}" }} + acl { tokens { initial_management = "{{ "{{" }}- {{ printf ".Data.data.%s" .Values.global.acls.bootstrapToken.secretKey }} -{{ "}}" }}" }} + {{ "{{" }}- end -{{ "}}" }} +{{- end -}} + {{/* Sets up the extra-from-values config file passed to consul and then uses sed to do any necessary substitution for HOST_IP/POD_IP/HOSTNAME. Useful for dogstats telemetry. The output file @@ -166,12 +173,11 @@ This template is for an init container. {{- if .Values.externalServers.tlsServerName }} -tls-server-name={{ .Values.externalServers.tlsServerName }} \ {{- end }} - {{- if not .Values.externalServers.useSystemRoots }} - -ca-file=/consul/tls/ca/tls.crt - {{- end }} {{- else }} -server-addr={{ template "consul.fullname" . }}-server \ -server-port=8501 \ + {{- end }} + {{- if or (not .Values.externalServers.enabled) (and .Values.externalServers.enabled (not .Values.externalServers.useSystemRoots)) }} {{- if .Values.global.secretsBackend.vault.enabled }} -ca-file=/vault/secrets/serverca.crt {{- else }} diff --git a/charts/consul/templates/api-gateway-controller-deployment.yaml b/charts/consul/templates/api-gateway-controller-deployment.yaml index 492d9f3302..eb64c9b58c 100644 --- a/charts/consul/templates/api-gateway-controller-deployment.yaml +++ b/charts/consul/templates/api-gateway-controller-deployment.yaml @@ -60,11 +60,8 @@ spec: fieldRef: fieldPath: status.hostIP {{- if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_HTTP_TOKEN - valueFrom: - secretKeyRef: - name: "{{ template "consul.fullname" . }}-api-gateway-controller-acl-token" - key: "token" + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/login/acl-token" {{- end }} - name: CONSUL_HTTP_ADDR {{- if .Values.global.tls.enabled }} @@ -73,35 +70,57 @@ spec: value: http://$(HOST_IP):8500 {{- end }} command: - - "/bin/sh" - - "-ec" - - | - consul-api-gateway server \ - -sds-server-host {{ template "consul.fullname" . }}-api-gateway-controller.{{ .Release.Namespace }}.svc \ - -k8s-namespace {{ .Release.Namespace }} \ - {{- if .Values.global.enableConsulNamespaces }} - {{- if .Values.apiGateway.consulNamespaces.consulDestinationNamespace }} - -consul-destination-namespace={{ .Values.apiGateway.consulNamespaces.consulDestinationNamespace }} \ - {{- end }} - {{- if .Values.apiGateway.consulNamespaces.mirroringK8S }} - -mirroring-k8s=true \ - {{- if .Values.apiGateway.consulNamespaces.mirroringK8SPrefix }} - -mirroring-k8s-prefix={{ .Values.apiGateway.consulNamespaces.mirroringK8SPrefix }} \ - {{- end }} - {{- end }} - {{- end }} - -log-level {{ default .Values.global.logLevel .Values.apiGateway.logLevel }} \ + - "/bin/sh" + - "-ec" + - | + consul-api-gateway server \ + -sds-server-host {{ template "consul.fullname" . }}-api-gateway-controller.{{ .Release.Namespace }}.svc \ + -k8s-namespace {{ .Release.Namespace }} \ + {{- if .Values.global.enableConsulNamespaces }} + {{- if .Values.apiGateway.consulNamespaces.consulDestinationNamespace }} + -consul-destination-namespace={{ .Values.apiGateway.consulNamespaces.consulDestinationNamespace }} \ + {{- end }} + {{- if .Values.apiGateway.consulNamespaces.mirroringK8S }} + -mirroring-k8s=true \ + {{- if .Values.apiGateway.consulNamespaces.mirroringK8SPrefix }} + -mirroring-k8s-prefix={{ .Values.apiGateway.consulNamespaces.mirroringK8SPrefix }} \ + {{- end }} + {{- end }} + {{- end }} + -log-level {{ default .Values.global.logLevel .Values.apiGateway.logLevel }} \ + -log-json={{ .Values.global.logJSON }} volumeMounts: - {{- if .Values.global.tls.enabled }} - {{- if .Values.global.tls.enableAutoEncrypt }} - - name: consul-auto-encrypt-ca-cert - {{- else }} - - name: consul-ca-cert - {{- end }} - mountPath: /consul/tls/ca - readOnly: true - {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: consul-bin + mountPath: /consul-bin + {{- end }} + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + - mountPath: /consul/login + name: consul-data + readOnly: true + {{- if .Values.apiGateway.resources }} + resources: + {{- toYaml .Values.apiGateway.resources | nindent 12 }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + lifecycle: + preStop: + exec: + command: [ "/bin/sh", "-ec", "/consul-bin/consul logout" ] + {{- end }} volumes: + {{- if .Values.global.acls.manageSystemACLs }} + - name: consul-bin + emptyDir: { } + {{- end }} {{- if .Values.global.tls.enabled }} {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} - name: consul-ca-cert @@ -121,18 +140,73 @@ spec: medium: "Memory" {{- end }} {{- end }} - {{- if or (and .Values.global.acls.manageSystemACLs) (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} + - name: consul-data + emptyDir: + medium: "Memory" + {{- if or .Values.global.acls.manageSystemACLs (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} initContainers: {{- if .Values.global.acls.manageSystemACLs }} + - name: copy-consul-bin + image: {{ .Values.global.image | quote }} + command: + - cp + - /bin/consul + - /consul-bin/consul + volumeMounts: + - name: consul-bin + mountPath: /consul-bin + {{- if .Values.apiGateway.initCopyConsulContainer }} + {{- if .Values.apiGateway.initCopyConsulContainer.resources }} + resources: {{ toYaml .Values.apiGateway.initCopyConsulContainer.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- end }} + {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} - name: api-gateway-controller-acl-init + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} image: {{ .Values.global.imageK8S }} + volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: false + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} command: - - "/bin/sh" - - "-ec" - - | - consul-k8s-control-plane acl-init \ - -secret-name="{{ template "consul.fullname" . }}-api-gateway-controller-acl-token" \ - -k8s-namespace={{ .Release.Namespace }} + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane acl-init \ + -component-name=api-gateway-controller \ + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -log-level={{ default .Values.global.logLevel .Values.apiGateway.logLevel }} \ + -log-json={{ .Values.global.logJSON }} resources: requests: memory: "25Mi" @@ -141,9 +215,6 @@ spec: memory: "25Mi" cpu: "50m" {{- end }} - {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} - {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} - {{- end }} {{- end }} {{- if .Values.apiGateway.controller.priorityClassName }} priorityClassName: {{ .Values.apiGateway.controller.priorityClassName | quote }} diff --git a/charts/consul/templates/auth-method-clusterrole.yaml b/charts/consul/templates/auth-method-clusterrole.yaml new file mode 100644 index 0000000000..6b8f2c5451 --- /dev/null +++ b/charts/consul/templates/auth-method-clusterrole.yaml @@ -0,0 +1,18 @@ +{{- if .Values.global.acls.manageSystemACLs }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "consul.fullname" . }}-auth-method + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: auth-method +rules: +- apiGroups: [ "" ] + resources: + - serviceaccounts + verbs: + - get +{{- end }} diff --git a/charts/consul/templates/auth-method-clusterrolebinding.yaml b/charts/consul/templates/auth-method-clusterrolebinding.yaml new file mode 100644 index 0000000000..9bd6c64113 --- /dev/null +++ b/charts/consul/templates/auth-method-clusterrolebinding.yaml @@ -0,0 +1,39 @@ +{{- if .Values.global.acls.manageSystemACLs }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-authdelegator + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: auth-method +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "system:auth-delegator" +subjects: +- kind: ServiceAccount + name: {{ template "consul.fullname" . }}-auth-method + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-auth-method + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: auth-method +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "consul.fullname" . }}-auth-method +subjects: +- kind: ServiceAccount + name: {{ template "consul.fullname" . }}-auth-method + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/consul/templates/auth-method-serviceaccount.yaml b/charts/consul/templates/auth-method-serviceaccount.yaml new file mode 100644 index 0000000000..098339b8c8 --- /dev/null +++ b/charts/consul/templates/auth-method-serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- if .Values.global.acls.manageSystemACLs }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-auth-method + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: auth-method +{{- with .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range . }} +- name: {{ .name }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/consul/templates/client-daemonset.yaml b/charts/consul/templates/client-daemonset.yaml index d5d4a07de0..334949f840 100644 --- a/charts/consul/templates/client-daemonset.yaml +++ b/charts/consul/templates/client-daemonset.yaml @@ -10,6 +10,7 @@ {{- if and .Values.global.federation.enabled .Values.global.adminPartitions.enabled }}{{ fail "If global.federation.enabled is true, global.adminPartitions.enabled must be false because they are mutually exclusive" }}{{ end }} {{- if (and .Values.global.enterpriseLicense.secretName (not .Values.global.enterpriseLicense.secretKey)) }}{{fail "enterpriseLicense.secretKey and secretName must both be specified." }}{{ end -}} {{- if (and (not .Values.global.enterpriseLicense.secretName) .Values.global.enterpriseLicense.secretKey) }}{{fail "enterpriseLicense.secretKey and secretName must both be specified." }}{{ end -}} +{{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} # DaemonSet to run the Consul clients on every node. apiVersion: apps/v1 kind: DaemonSet @@ -48,6 +49,7 @@ spec: annotations: {{- if .Values.global.secretsBackend.vault.enabled }} "vault.hashicorp.com/agent-inject": "true" + "vault.hashicorp.com/agent-init-first": "true" "vault.hashicorp.com/role": "{{ .Values.global.secretsBackend.vault.consulClientRole }}" {{- if and .Values.global.secretsBackend.vault.ca.secretName .Values.global.secretsBackend.vault.ca.secretKey }} "vault.hashicorp.com/agent-extra-secret": "{{ .Values.global.secretsBackend.vault.ca.secretName }}" @@ -66,7 +68,7 @@ spec: {{- if .Values.global.secretsBackend.vault.agentAnnotations }} {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} {{- end }} - {{- if .Values.global.enterpriseLicense.secretName }} + {{- if and .Values.global.enterpriseLicense.secretName (not .Values.global.acls.manageSystemACLs) }} {{- with .Values.global.enterpriseLicense }} "vault.hashicorp.com/agent-inject-secret-enterpriselicense.txt": "{{ .secretName }}" "vault.hashicorp.com/agent-inject-template-enterpriselicense.txt": {{ template "consul.vaultSecretTemplate" . }} @@ -124,6 +126,9 @@ spec: - name: config configMap: name: {{ template "consul.fullname" . }}-client-config + - name: consul-data + emptyDir: + medium: "Memory" {{- if .Values.global.tls.enabled }} {{- if not .Values.global.secretsBackend.vault.enabled }} - name: consul-ca-cert @@ -136,7 +141,8 @@ spec: items: - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} path: tls.crt - {{ if not .Values.global.tls.enableAutoEncrypt }} + {{- end }} + {{- if (and (not .Values.global.secretsBackend.vault.enabled) (not .Values.global.tls.enableAutoEncrypt)) }} - name: consul-ca-key secret: {{- if .Values.global.tls.caKey.secretName }} @@ -154,7 +160,6 @@ spec: medium: "Memory" {{- end }} {{- end }} - {{- end }} {{- range .Values.client.extraVolumes }} - name: userconfig-{{ .name }} {{ .type }}: @@ -177,7 +182,21 @@ spec: containers: - name: consul image: "{{ default .Values.global.image .Values.client.image }}" + {{- if .Values.global.acls.manageSystemACLs }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + consul logout + {{- end }} env: + {{- if .Values.global.acls.manageSystemACLs }} + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/login/acl-token" + {{- end }} - name: ADVERTISE_IP valueFrom: fieldRef: @@ -340,6 +359,9 @@ spec: mountPath: /consul/data - name: config mountPath: /consul/config + - mountPath: /consul/login + name: consul-data + readOnly: true {{- if .Values.global.tls.enabled }} {{- if not .Values.global.secretsBackend.vault.enabled }} - name: consul-ca-cert @@ -435,17 +457,57 @@ spec: {{- if .Values.global.acls.manageSystemACLs }} - name: client-acl-init image: {{ .Values.global.imageK8S }} + env: + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:8501 + {{- else }} + value: http://{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:8500 + {{- end }} + {{- if (and .Values.global.tls.enabled (not .Values.externalServers.useSystemRoots)) }} + - name: CONSUL_CACERT + {{- if .Values.global.secretsBackend.vault.enabled }} + value: "/vault/secrets/serverca.crt" + {{- else }} + value: "/consul/tls/ca/tls.crt" + {{- end }} + {{- end }} command: - "/bin/sh" - "-ec" - | consul-k8s-control-plane acl-init \ - -secret-name="{{ template "consul.fullname" . }}-client-acl-token" \ - -k8s-namespace={{ .Release.Namespace }} \ + -component-name=client \ + -acl-auth-method="{{ template "consul.fullname" . }}-k8s-component-auth-method" \ + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -log-level={{ default .Values.global.logLevel .Values.client.logLevel }} \ + -log-json={{ .Values.global.logJSON }} \ + {{- if .Values.externalServers.enabled }} + {{- if .Values.global.tls.enabled }} + -use-https \ + {{- end }} + {{- range .Values.externalServers.hosts }} + -server-address={{ quote . }} \ + {{- end }} + -server-port={{ .Values.externalServers.httpsPort }} \ + {{- if .Values.externalServers.tlsServerName }} + -tls-server-name={{ .Values.externalServers.tlsServerName }} \ + {{- end }} + {{- end }} -init-type="client" volumeMounts: - name: aclconfig mountPath: /consul/aclconfig + - mountPath: /consul/login + name: consul-data + readOnly: false + {{- if (and (not .Values.global.secretsBackend.vault.enabled) (not .Values.externalServers.useSystemRoots)) }} + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: false + {{- end }} resources: requests: memory: "25Mi" diff --git a/charts/consul/templates/client-snapshot-agent-deployment.yaml b/charts/consul/templates/client-snapshot-agent-deployment.yaml index 7b7e953c98..f0ed5b2588 100644 --- a/charts/consul/templates/client-snapshot-agent-deployment.yaml +++ b/charts/consul/templates/client-snapshot-agent-deployment.yaml @@ -1,5 +1,7 @@ {{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +{{- if or (and .Values.client.snapshotAgent.configSecret.secretName (not .Values.client.snapshotAgent.configSecret.secretKey)) (and (not .Values.client.snapshotAgent.configSecret.secretName) .Values.client.snapshotAgent.configSecret.secretKey) }}{{fail "client.snapshotAgent.configSecret.secretKey and client.snapshotAgent.configSecret.secretName must both be specified." }}{{ end -}} {{- if .Values.client.snapshotAgent.enabled }} +{{- if or (and .Values.client.snapshotAgent.configSecret.secretName (not .Values.client.snapshotAgent.configSecret.secretKey)) (and (not .Values.client.snapshotAgent.configSecret.secretName) .Values.client.snapshotAgent.configSecret.secretKey) }}{{fail "client.snapshotAgent.configSecret.secretKey and client.snapshotAgent.configSecret.secretName must both be specified." }}{{ end -}} apiVersion: apps/v1 kind: Deployment metadata: @@ -29,10 +31,14 @@ spec: annotations: "consul.hashicorp.com/connect-inject": "false" {{- if .Values.global.secretsBackend.vault.enabled }} + {{- if .Values.client.snapshotAgent.configSecret.secretName }} + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulSnapshotAgentRole }} + {{- else if and .Values.global.tls.enabled }} + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulCARole }} + {{- end }} {{- if .Values.global.tls.enabled }} "vault.hashicorp.com/agent-init-first": "true" "vault.hashicorp.com/agent-inject": "true" - "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulCARole }} "vault.hashicorp.com/agent-inject-secret-serverca.crt": {{ .Values.global.tls.caCert.secretName }} "vault.hashicorp.com/agent-inject-template-serverca.crt": {{ template "consul.serverTLSCATemplate" . }} {{- if and .Values.global.secretsBackend.vault.ca.secretName .Values.global.secretsBackend.vault.ca.secretKey }} @@ -41,14 +47,20 @@ spec: {{- end }} {{- if .Values.global.secretsBackend.vault.agentAnnotations }} {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} - {{- end }} + {{- end }} {{- end }} {{- if .Values.global.enterpriseLicense.secretName }} {{- with .Values.global.enterpriseLicense }} "vault.hashicorp.com/agent-inject-secret-enterpriselicense.txt": "{{ .secretName }}" "vault.hashicorp.com/agent-inject-template-enterpriselicense.txt": {{ template "consul.vaultSecretTemplate" . }} {{- end }} - {{- end }} + {{- end }} + {{- if .Values.client.snapshotAgent.configSecret.secretName }} + {{- with .Values.client.snapshotAgent.configSecret }} + "vault.hashicorp.com/agent-inject-secret-snapshot-agent-config.json": "{{ .secretName }}" + "vault.hashicorp.com/agent-inject-template-snapshot-agent-config.json": {{ template "consul.vaultSecretTemplate" . }} + {{- end }} + {{- end }} {{- end }} spec: {{- if .Values.client.tolerations }} @@ -62,139 +74,180 @@ spec: {{- end }} {{- if (or .Values.global.acls.manageSystemACLs .Values.global.tls.enabled (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload)) }} volumes: - {{- if (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) }} - - name: snapshot-config - secret: - secretName: {{ .Values.client.snapshotAgent.configSecret.secretName }} - items: - - key: {{ .Values.client.snapshotAgent.configSecret.secretKey }} - path: snapshot-config.json + - name: consul-data + emptyDir: + medium: "Memory" + {{- if (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey (not .Values.global.secretsBackend.vault.enabled)) }} + - name: snapshot-config + secret: + secretName: {{ .Values.client.snapshotAgent.configSecret.secretName }} + items: + - key: {{ .Values.client.snapshotAgent.configSecret.secretKey }} + path: snapshot-config.json + {{- end }} + {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload (not .Values.global.secretsBackend.vault.enabled) (not .Values.global.acls.manageSystemACLs)) }} + - name: consul-license + secret: + secretName: {{ .Values.global.enterpriseLicense.secretName }} + {{- end }} + {{- if .Values.global.tls.enabled }} + {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} + {{- end }} + {{- end }} + containers: + - name: consul-snapshot-agent + image: "{{ default .Values.global.image .Values.client.image }}" + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 {{- end }} {{- if .Values.global.acls.manageSystemACLs }} - - name: aclconfig - emptyDir: {} + - name: CONSUL_HTTP_TOKEN_FILE + value: /consul/login/acl-token {{- else }} - {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload (not .Values.global.secretsBackend.vault.enabled)) }} - - name: consul-license - secret: - secretName: {{ .Values.global.enterpriseLicense.secretName }} + {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload) }} + - name: CONSUL_LICENSE_PATH + {{- if .Values.global.secretsBackend.vault.enabled }} + value: /vault/secrets/enterpriselicense.txt + {{- else }} + value: /consul/license/{{ .Values.global.enterpriseLicense.secretKey }} + {{- end }} {{- end }} {{- end }} - {{- if .Values.global.tls.enabled }} - {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} - - name: consul-ca-cert - secret: - {{- if .Values.global.tls.caCert.secretName }} - secretName: {{ .Values.global.tls.caCert.secretName }} + command: + - "/bin/sh" + - "-ec" + - | + {{- if .Values.client.snapshotAgent.caCert }} + cat < /etc/ssl/certs/custom-ca.pem + {{- .Values.client.snapshotAgent.caCert | nindent 14 }} + EOF + {{- end }} + exec /bin/consul snapshot agent \ + {{- if (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) }} + {{- if .Values.global.secretsBackend.vault.enabled }} + -config-file=/vault/secrets/snapshot-agent-config.json \ {{- else }} - secretName: {{ template "consul.fullname" . }}-ca-cert + -config-dir=/consul/config \ + {{- end }} {{- end }} - items: - - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} - path: tls.crt + {{- if .Values.global.acls.manageSystemACLs }} + -config-dir=/consul/login \ + {{- end }} + {{- if (or .Values.global.acls.manageSystemACLs .Values.global.tls.enabled (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload)) }} + {{- if .Values.global.acls.manageSystemACLs }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + /bin/consul logout {{- end }} - {{- if .Values.global.tls.enableAutoEncrypt }} + volumeMounts: + {{- if (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey (not .Values.global.secretsBackend.vault.enabled)) }} + - name: snapshot-config + readOnly: true + mountPath: /consul/config + {{- end }} + - mountPath: /consul/login + name: consul-data + readOnly: true + {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload (not .Values.global.secretsBackend.vault.enabled) (not .Values.global.acls.manageSystemACLs))}} + - name: consul-license + mountPath: /consul/license + readOnly: true + {{- end }} + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt}} - name: consul-auto-encrypt-ca-cert - emptyDir: - medium: "Memory" + {{- else }} + - name: consul-ca-cert {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- end }} + {{- with .Values.client.snapshotAgent.resources }} + resources: + {{- toYaml . | nindent 12 }} {{- end }} - {{- end }} - containers: - - name: consul-snapshot-agent - image: "{{ default .Values.global.image .Values.client.image }}" - env: - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - {{- if .Values.global.tls.enabled }} - - name: CONSUL_HTTP_ADDR - value: https://$(HOST_IP):8501 - - name: CONSUL_CACERT - value: /consul/tls/ca/tls.crt - {{- else }} - - name: CONSUL_HTTP_ADDR - value: http://$(HOST_IP):8500 - {{- end }} - {{- if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_HTTP_TOKEN - valueFrom: - secretKeyRef: - name: "{{ template "consul.fullname" . }}-client-snapshot-agent-acl-token" - key: "token" - {{- else }} - {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload) }} - - name: CONSUL_LICENSE_PATH - {{- if .Values.global.secretsBackend.vault.enabled }} - value: /vault/secrets/enterpriselicense.txt - {{- else }} - value: /consul/license/{{ .Values.global.enterpriseLicense.secretKey }} - {{- end }} - {{- end }} - {{- end}} - command: - - "/bin/sh" - - "-ec" - - | - {{- if .Values.client.snapshotAgent.caCert }} - cat < /etc/ssl/certs/custom-ca.pem - {{- .Values.client.snapshotAgent.caCert | nindent 14 }} - EOF - {{- end }} - exec /bin/consul snapshot agent \ - {{- if (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) }} - -config-dir=/consul/config \ - {{- end }} - {{- if .Values.global.acls.manageSystemACLs }} - -config-dir=/consul/aclconfig \ - {{- end }} - {{- if (or .Values.global.acls.manageSystemACLs .Values.global.tls.enabled (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload)) }} - volumeMounts: - {{- if (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) }} - - name: snapshot-config - readOnly: true - mountPath: /consul/config - {{- end }} - {{- if .Values.global.acls.manageSystemACLs }} - - name: aclconfig - mountPath: /consul/aclconfig - {{- else }} - {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload (not .Values.global.secretsBackend.vault.enabled)) }} - - name: consul-license - mountPath: /consul/license - readOnly: true - {{- end }} - {{- end }} - {{- if .Values.global.tls.enabled }} - {{- if .Values.global.tls.enableAutoEncrypt}} - - name: consul-auto-encrypt-ca-cert - {{- else }} - - name: consul-ca-cert - {{- end }} - mountPath: /consul/tls/ca - readOnly: true - {{- end }} - {{- end }} - {{- with .Values.client.snapshotAgent.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} {{- if (or .Values.global.acls.manageSystemACLs (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt)) }} initContainers: + {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} {{- if .Values.global.acls.manageSystemACLs }} - - name: client-snapshot-agent-acl-init + - name: snapshot-agent-acl-init + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} image: {{ .Values.global.imageK8S }} - command: - - "/bin/sh" - - "-ec" - - | - consul-k8s-control-plane acl-init \ - -secret-name="{{ template "consul.fullname" . }}-client-snapshot-agent-acl-token" \ - -k8s-namespace={{ .Release.Namespace }} volumeMounts: - - name: aclconfig - mountPath: /consul/aclconfig + - mountPath: /consul/login + name: consul-data + readOnly: false + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane acl-init \ + -component-name=snapshot-agent \ + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -token-sink-file=/consul/login/acl-token \ + -log-level={{ default .Values.global.logLevel }} \ + -log-json={{ .Values.global.logJSON }} resources: requests: memory: "25Mi" @@ -203,9 +256,6 @@ spec: memory: "25Mi" cpu: "50m" {{- end }} - {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} - {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} - {{- end }} {{- end }} {{- if .Values.client.nodeSelector }} nodeSelector: diff --git a/charts/consul/templates/client-snapshot-agent-role.yaml b/charts/consul/templates/client-snapshot-agent-role.yaml index 6691750487..3077bc96f0 100644 --- a/charts/consul/templates/client-snapshot-agent-role.yaml +++ b/charts/consul/templates/client-snapshot-agent-role.yaml @@ -11,27 +11,16 @@ metadata: heritage: {{ .Release.Service }} release: {{ .Release.Name }} component: client-snapshot-agent -{{- if (or .Values.global.acls.manageSystemACLs .Values.global.enablePodSecurityPolicies) }} -rules: {{- if .Values.global.enablePodSecurityPolicies }} - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - resourceNames: - - {{ template "consul.fullname" . }}-snapshot-agent - verbs: - - use -{{- end }} -{{- if .Values.global.acls.manageSystemACLs }} - - apiGroups: [""] - resources: - - secrets - resourceNames: - - {{ template "consul.fullname" . }}-client-snapshot-agent-acl-token - verbs: - - get -{{- end }} +rules: +- apiGroups: [ "policy" ] + resources: [ "podsecuritypolicies" ] + resourceNames: + - {{ template "consul.fullname" . }}-snapshot-agent + verbs: + - use {{- else }} -rules: [] +rules: [ ] {{- end }} {{- end }} {{- end }} diff --git a/charts/consul/templates/connect-inject-authmethod-clusterrolebinding.yaml b/charts/consul/templates/connect-inject-authmethod-clusterrolebinding.yaml deleted file mode 100644 index 4f9d7c8083..0000000000 --- a/charts/consul/templates/connect-inject-authmethod-clusterrolebinding.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled) }} -{{- if .Values.global.acls.manageSystemACLs }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "consul.fullname" . }}-connect-injector-authdelegator - labels: - app: {{ template "consul.name" . }} - chart: {{ template "consul.chart" . }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} - component: connect-injector -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: "system:auth-delegator" -subjects: - - kind: ServiceAccount - name: {{ template "consul.fullname" . }}-connect-injector - namespace: {{ .Release.Namespace }} -{{- end }} -{{- end }} diff --git a/charts/consul/templates/connect-inject-clusterrole.yaml b/charts/consul/templates/connect-inject-clusterrole.yaml index 9d01420363..683a9c6bf7 100644 --- a/charts/consul/templates/connect-inject-clusterrole.yaml +++ b/charts/consul/templates/connect-inject-clusterrole.yaml @@ -12,14 +12,14 @@ metadata: component: connect-injector rules: {{- if .Values.global.acls.manageSystemACLs }} -- apiGroups: [""] +- apiGroups: [ "" ] resources: - serviceaccounts verbs: - get {{- end }} -- apiGroups: [""] - resources: ["pods", "endpoints", "services", "namespaces"] +- apiGroups: [ "" ] + resources: [ "pods", "endpoints", "services", "namespaces" ] verbs: - "get" - "list" @@ -34,25 +34,11 @@ rules: - list - update {{- if .Values.global.enablePodSecurityPolicies }} -- apiGroups: ["policy"] - resources: ["podsecuritypolicies"] +- apiGroups: [ "policy" ] + resources: [ "podsecuritypolicies" ] resourceNames: - {{ template "consul.fullname" . }}-connect-injector verbs: - use {{- end }} -{{- if .Values.global.acls.manageSystemACLs }} -- apiGroups: [""] - resources: - - secrets - resourceNames: - - {{ template "consul.fullname" . }}-connect-inject-acl-token - verbs: - - get -- apiGroups: [""] - resources: - - serviceaccounts - verbs: - - get -{{- end }} {{- end }} diff --git a/charts/consul/templates/connect-inject-clusterrolebinding.yaml b/charts/consul/templates/connect-inject-clusterrolebinding.yaml index 64bff8269f..c380adb311 100644 --- a/charts/consul/templates/connect-inject-clusterrolebinding.yaml +++ b/charts/consul/templates/connect-inject-clusterrolebinding.yaml @@ -17,4 +17,4 @@ subjects: - kind: ServiceAccount name: {{ template "consul.fullname" . }}-connect-injector namespace: {{ .Release.Namespace }} -{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/consul/templates/connect-inject-deployment.yaml b/charts/consul/templates/connect-inject-deployment.yaml index cd5ad9ddd2..5804cc988b 100644 --- a/charts/consul/templates/connect-inject-deployment.yaml +++ b/charts/consul/templates/connect-inject-deployment.yaml @@ -66,6 +66,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + {{- if .Values.global.acls.manageSystemACLs }} + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/login/acl-token" + {{- end }} {{- if .Values.global.tls.enabled }} - name: CONSUL_CACERT value: /consul/tls/ca/tls.crt @@ -80,12 +84,6 @@ spec: secretKeyRef: name: {{ .Values.connectInject.aclInjectToken.secretName }} key: {{ .Values.connectInject.aclInjectToken.secretKey }} - {{- else if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_HTTP_TOKEN - valueFrom: - secretKeyRef: - name: "{{ template "consul.fullname" . }}-connect-inject-acl-token" - key: "token" {{- end }} - name: CONSUL_HTTP_ADDR {{- if .Values.global.tls.enabled }} @@ -216,6 +214,16 @@ spec: -default-consul-sidecar-cpu-request={{ $consulSidecarResources.requests.cpu }} \ {{- end }} {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane consul-logout + {{- end }} startupProbe: httpGet: path: /readyz/ready @@ -246,6 +254,9 @@ spec: - name: certs mountPath: /etc/connect-injector/certs readOnly: true + - mountPath: /consul/login + name: consul-data + readOnly: true {{- if .Values.global.tls.enabled }} {{- if .Values.global.tls.enableAutoEncrypt }} - name: consul-auto-encrypt-ca-cert @@ -264,6 +275,9 @@ spec: secret: defaultMode: 420 secretName: {{ template "consul.fullname" . }}-connect-inject-webhook-cert + - name: consul-data + emptyDir: + medium: "Memory" {{- if .Values.global.tls.enabled }} {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} - name: consul-ca-cert @@ -285,16 +299,57 @@ spec: {{- end }} {{- if or (and .Values.global.acls.manageSystemACLs) (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} initContainers: + {{- if and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} {{- if .Values.global.acls.manageSystemACLs }} - - name: injector-acl-init + - name: connect-injector-acl-init + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} image: {{ .Values.global.imageK8S }} + volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: false + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} command: - "/bin/sh" - "-ec" - | consul-k8s-control-plane acl-init \ - -secret-name="{{ template "consul.fullname" . }}-connect-inject-acl-token" \ - -k8s-namespace={{ .Release.Namespace }} + -component-name=connect-injector \ + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ + -primary-datacenter={{ .Values.global.federation.primaryDatacenter }} \ + {{- else }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -log-level={{ default .Values.global.logLevel .Values.connectInject.logLevel }} \ + -log-json={{ .Values.global.logJSON }} resources: requests: memory: "25Mi" @@ -303,9 +358,6 @@ spec: memory: "25Mi" cpu: "50m" {{- end }} - {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} - {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} - {{- end }} {{- end }} {{- if .Values.connectInject.priorityClassName }} priorityClassName: {{ .Values.connectInject.priorityClassName | quote }} diff --git a/charts/consul/templates/connect-inject-serviceaccount.yaml b/charts/consul/templates/connect-inject-serviceaccount.yaml index 250b23d6c3..ea2352c7ac 100644 --- a/charts/consul/templates/connect-inject-serviceaccount.yaml +++ b/charts/consul/templates/connect-inject-serviceaccount.yaml @@ -17,7 +17,7 @@ metadata: {{- with .Values.global.imagePullSecrets }} imagePullSecrets: {{- range . }} - - name: {{ .name }} +- name: {{ .name }} {{- end }} {{- end }} {{- end }} diff --git a/charts/consul/templates/controller-clusterrole.yaml b/charts/consul/templates/controller-clusterrole.yaml index 45fa8d8458..e2522a2eae 100644 --- a/charts/consul/templates/controller-clusterrole.yaml +++ b/charts/consul/templates/controller-clusterrole.yaml @@ -57,15 +57,6 @@ rules: - get - list - update -{{- if .Values.global.acls.manageSystemACLs }} -- apiGroups: [""] - resources: - - secrets - resourceNames: - - {{ template "consul.fullname" . }}-controller-acl-token - verbs: - - get -{{- end }} {{- if .Values.global.enablePodSecurityPolicies }} - apiGroups: ["policy"] resources: ["podsecuritypolicies"] diff --git a/charts/consul/templates/controller-deployment.yaml b/charts/consul/templates/controller-deployment.yaml index e5ed0d74f5..29e5aa5bda 100644 --- a/charts/consul/templates/controller-deployment.yaml +++ b/charts/consul/templates/controller-deployment.yaml @@ -47,16 +47,57 @@ spec: spec: {{- if or .Values.global.acls.manageSystemACLs (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} initContainers: + {{- if and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} {{- if .Values.global.acls.manageSystemACLs }} - name: controller-acl-init + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} image: {{ .Values.global.imageK8S }} + volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: false + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} command: - "/bin/sh" - "-ec" - | consul-k8s-control-plane acl-init \ - -secret-name="{{ template "consul.fullname" . }}-controller-acl-token" \ - -k8s-namespace={{ .Release.Namespace }} + -component-name=controller \ + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ + -primary-datacenter={{ .Values.global.federation.primaryDatacenter }} \ + {{- else }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -log-level={{ default .Values.global.logLevel .Values.controller.logLevel }} \ + -log-json={{ .Values.global.logJSON }} resources: requests: memory: "25Mi" @@ -65,9 +106,6 @@ spec: memory: "25Mi" cpu: "50m" {{- end }} - {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} - {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} - {{- end }} {{- end }} containers: - command: @@ -98,7 +136,21 @@ spec: -consul-cross-namespace-acl-policy=cross-namespace-policy \ {{- end }} {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane consul-logout + {{- end }} env: + {{- if .Values.global.acls.manageSystemACLs }} + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/login/acl-token" + {{- end }} - name: HOST_IP valueFrom: fieldRef: @@ -110,13 +162,6 @@ spec: name: {{ .Values.controller.aclToken.secretName }} key: {{ .Values.controller.aclToken.secretKey }} {{- end }} - {{- if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_HTTP_TOKEN - valueFrom: - secretKeyRef: - name: "{{ template "consul.fullname" . }}-controller-acl-token" - key: "token" - {{- end}} {{- if .Values.global.tls.enabled }} - name: CONSUL_CACERT value: /consul/tls/ca/tls.crt @@ -138,6 +183,9 @@ spec: {{- toYaml . | nindent 12 }} {{- end }} volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: true - mountPath: /tmp/controller-webhook/certs name: cert readOnly: true @@ -175,6 +223,9 @@ spec: medium: "Memory" {{- end }} {{- end }} + - name: consul-data + emptyDir: + medium: "Memory" serviceAccountName: {{ template "consul.fullname" . }}-controller {{- if .Values.controller.nodeSelector }} nodeSelector: diff --git a/charts/consul/templates/ingress-gateways-deployment.yaml b/charts/consul/templates/ingress-gateways-deployment.yaml index d520bd9845..bbd2388959 100644 --- a/charts/consul/templates/ingress-gateways-deployment.yaml +++ b/charts/consul/templates/ingress-gateways-deployment.yaml @@ -9,6 +9,17 @@ {{- $defaults := .Values.ingressGateways.defaults }} {{- $names := dict }} +{{- /* Check if gateway names are unique. */ -}} +{{- $gateways := .Values.ingressGateways.gateways }} +{{- range $outerIngressIndex, $outerIngressVal := $gateways }} + +{{- range $innerIngressIndex, $innerIngressVal := $gateways }} +{{- if (and (ne $outerIngressIndex $innerIngressIndex) (eq $outerIngressVal.name $innerIngressVal.name)) }} +{{ fail (cat "ingress gateways must have unique names but found duplicate name" $innerIngressVal.name) }} +{{ end -}} +{{ end -}} +{{ end -}} + {{- range .Values.ingressGateways.gateways }} {{- $service := .service }} @@ -140,8 +151,8 @@ spec: {{- if (and $root.Values.global.tls.enabled $root.Values.global.tls.enableAutoEncrypt) }} {{- include "consul.getAutoEncryptClientCA" $root | nindent 8 }} {{- end }} - # service-init registers the ingress gateway service. - - name: service-init + # ingress-gateway-init registers the ingress gateway service with Consul. + - name: ingress-gateway-init image: {{ $root.Values.global.imageK8S }} env: - name: HOST_IP @@ -171,9 +182,14 @@ spec: - | {{- if $root.Values.global.acls.manageSystemACLs }} consul-k8s-control-plane acl-init \ - -secret-name="{{ template "consul.fullname" $root }}-{{ .name }}-ingress-gateway-acl-token" \ - -k8s-namespace={{ $root.Release.Namespace }} \ - -token-sink-file=/consul/service/acl-token + -component-name=ingress-gateway/{{ template "consul.fullname" $root }}-{{ .name }} \ + -acl-auth-method={{ template "consul.fullname" $root }}-k8s-component-auth-method \ + {{- if $root.Values.global.adminPartitions.enabled }} + -partition={{ $root.Values.global.adminPartitions.name }} \ + {{- end }} + -token-sink-file=/consul/service/acl-token \ + -log-level={{ default $root.Values.global.logLevel }} \ + -log-json={{ $root.Values.global.logJSON }} {{ end }} {{- $serviceType := (default $defaults.service.type $service.type) }} @@ -306,6 +322,9 @@ spec: volumeMounts: - name: consul-bin mountPath: /consul-bin + - name: consul-service + mountPath: /consul/service + readOnly: true {{- if $root.Values.global.tls.enabled }} {{- if $root.Values.global.tls.enableAutoEncrypt }} - name: consul-auto-encrypt-ca-cert @@ -329,11 +348,8 @@ spec: fieldRef: fieldPath: metadata.name {{- if $root.Values.global.acls.manageSystemACLs }} - - name: CONSUL_HTTP_TOKEN - valueFrom: - secretKeyRef: - name: "{{ template "consul.fullname" $root }}-{{ .name }}-ingress-gateway-acl-token" - key: "token" + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/service/acl-token" {{- end}} {{- if $root.Values.global.tls.enabled }} - name: CONSUL_HTTP_ADDR @@ -399,6 +415,9 @@ spec: -partition={{ $root.Values.global.adminPartitions.name }} \ {{- end }} -id="${POD_NAME}" + {{- if $root.Values.global.acls.manageSystemACLs }} + - "/consul-bin/consul logout" + {{- end}} # consul-sidecar ensures the ingress gateway is always registered with # the local Consul agent, even if it loses the initial registration. diff --git a/charts/consul/templates/ingress-gateways-role.yaml b/charts/consul/templates/ingress-gateways-role.yaml index ab211eeed1..49e8486e58 100644 --- a/charts/consul/templates/ingress-gateways-role.yaml +++ b/charts/consul/templates/ingress-gateways-role.yaml @@ -37,7 +37,7 @@ rules: resources: - secrets resourceNames: - - {{ template "consul.fullname" $root }}-{{ .name }}-ingress-gateway-acl-token + - {{ template "consul.fullname" $root }}-{{ .name }}-acl-token verbs: - get {{- end }} diff --git a/charts/consul/templates/mesh-gateway-clusterrole.yaml b/charts/consul/templates/mesh-gateway-clusterrole.yaml index 2df2ba5343..b951418b26 100644 --- a/charts/consul/templates/mesh-gateway-clusterrole.yaml +++ b/charts/consul/templates/mesh-gateway-clusterrole.yaml @@ -19,15 +19,6 @@ rules: verbs: - use {{- end }} -{{- if .Values.global.acls.manageSystemACLs }} - - apiGroups: [""] - resources: - - secrets - resourceNames: - - {{ template "consul.fullname" . }}-mesh-gateway-acl-token - verbs: - - get -{{- end }} {{- if eq .Values.meshGateway.wanAddress.source "Service" }} - apiGroups: [""] resources: diff --git a/charts/consul/templates/mesh-gateway-deployment.yaml b/charts/consul/templates/mesh-gateway-deployment.yaml index 8a0a05caba..4ce13acb6d 100644 --- a/charts/consul/templates/mesh-gateway-deployment.yaml +++ b/charts/consul/templates/mesh-gateway-deployment.yaml @@ -121,26 +121,26 @@ spec: {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} {{- include "consul.getAutoEncryptClientCA" . | nindent 8 }} {{- end }} - # service-init registers the mesh gateway service. - - name: service-init + - name: mesh-gateway-init image: {{ .Values.global.imageK8S }} env: - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR {{- if .Values.global.tls.enabled }} - - name: CONSUL_HTTP_ADDR - value: https://$(HOST_IP):8501 - - name: CONSUL_CACERT - value: /consul/tls/ca/tls.crt + value: https://$(HOST_IP):8501 {{- else }} - - name: CONSUL_HTTP_ADDR - value: http://$(HOST_IP):8500 + value: http://$(HOST_IP):8500 {{- end }} command: - "/bin/sh" @@ -148,9 +148,19 @@ spec: - | {{- if .Values.global.acls.manageSystemACLs }} consul-k8s-control-plane acl-init \ - -secret-name="{{ template "consul.fullname" . }}-mesh-gateway-acl-token" \ - -k8s-namespace={{ .Release.Namespace }} \ - -token-sink-file=/consul/service/acl-token + -component-name=mesh-gateway \ + -token-sink-file=/consul/service/acl-token \ + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ + -primary-datacenter={{ .Values.global.federation.primaryDatacenter }} \ + {{- else }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -log-level={{ default .Values.global.logLevel }} \ + -log-json={{ .Values.global.logJSON }} {{ end }} {{- $source := .Values.meshGateway.wanAddress.source }} @@ -258,6 +268,9 @@ spec: {{- end }} {{- end }} volumeMounts: + - mountPath: /consul/service + name: consul-service + readOnly: true - name: consul-bin mountPath: /consul-bin {{- if .Values.global.tls.enabled }} @@ -285,12 +298,9 @@ spec: fieldPath: spec.nodeName {{- end }} {{- if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_HTTP_TOKEN - valueFrom: - secretKeyRef: - name: "{{ template "consul.fullname" . }}-mesh-gateway-acl-token" - key: "token" - {{- end}} + - name: CONSUL_HTTP_TOKEN_FILE + value: /consul/service/acl-token + {{- end }} {{- if .Values.global.tls.enabled }} - name: CONSUL_HTTP_ADDR value: https://$(HOST_IP):8501 @@ -337,7 +347,13 @@ spec: lifecycle: preStop: exec: - command: ["/bin/sh", "-ec", "/consul-bin/consul services deregister -id=\"{{ .Values.meshGateway.consulServiceName }}\""] + command: + - "/bin/sh" + - "-ec" + - "/consul-bin/consul services deregister -id=\"{{ .Values.meshGateway.consulServiceName }}\"" + {{- if .Values.global.acls.manageSystemACLs }} + - "/consul-bin/consul logout" + {{- end}} # consul-sidecar ensures the mesh gateway is always registered with # the local Consul agent, even if it loses the initial registration. diff --git a/charts/consul/templates/partition-init-job.yaml b/charts/consul/templates/partition-init-job.yaml index fe5f26fd86..acc802b16a 100644 --- a/charts/consul/templates/partition-init-job.yaml +++ b/charts/consul/templates/partition-init-job.yaml @@ -2,6 +2,7 @@ {{- if (and .Values.global.adminPartitions.enabled (not $serverEnabled) (ne .Values.global.adminPartitions.name "default")) }} {{- template "consul.reservedNamesFailer" (list .Values.global.adminPartitions.name "global.adminPartitions.name") }} {{- if and (not .Values.externalServers.enabled) (ne .Values.global.adminPartitions.name "default") }}{{ fail "externalServers.enabled needs to be true and configured to create a non-default partition." }}{{ end -}} +{{- if and .Values.global.secretsBackend.vault.enabled .Values.global.acls.manageSystemACLs (not .Values.global.secretsBackend.vault.adminPartitionsRole) }}{{ fail "global.secretsBackend.vault.adminPartitionsRole is required when global.secretsBackend.vault.enabled and global.acls.manageSystemACLs are true." }}{{ end -}} apiVersion: batch/v1 kind: Job metadata: @@ -28,11 +29,35 @@ spec: component: partition-init annotations: "consul.hashicorp.com/connect-inject": "false" + {{- if (and .Values.global.secretsBackend.vault.enabled (or .Values.global.tls.enabled .Values.global.acls.manageSystemACLs)) }} + "vault.hashicorp.com/agent-pre-populate-only": "true" + "vault.hashicorp.com/agent-inject": "true" + {{- if .Values.global.acls.manageSystemACLs }} + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.adminPartitionsRole }} + {{- if .Values.global.acls.bootstrapToken.secretName }} + {{- with .Values.global.acls.bootstrapToken }} + "vault.hashicorp.com/agent-inject-secret-bootstrap-token": "{{ .secretName }}" + "vault.hashicorp.com/agent-inject-template-bootstrap-token": {{ template "consul.vaultSecretTemplate" . }} + {{- end }} + {{- end }} + {{- else }} + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulCARole }} + {{- end }} + "vault.hashicorp.com/agent-inject-secret-serverca.crt": {{ .Values.global.tls.caCert.secretName }} + "vault.hashicorp.com/agent-inject-template-serverca.crt": {{ template "consul.serverTLSCATemplate" . }} + {{- if and .Values.global.secretsBackend.vault.ca.secretName .Values.global.secretsBackend.vault.ca.secretKey }} + "vault.hashicorp.com/agent-extra-secret": "{{ .Values.global.secretsBackend.vault.ca.secretName }}" + "vault.hashicorp.com/ca-cert": "/vault/custom/{{ .Values.global.secretsBackend.vault.ca.secretKey }}" + {{- end }} + {{- if .Values.global.secretsBackend.vault.agentAnnotations }} + {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} + {{- end }} + {{- end }} spec: restartPolicy: Never serviceAccountName: {{ template "consul.fullname" . }}-partition-init {{- if .Values.global.tls.enabled }} - {{- if not .Values.externalServers.useSystemRoots }} + {{- if not (or .Values.externalServers.useSystemRoots .Values.global.secretsBackend.vault.enabled) }} volumes: - name: consul-ca-cert secret: @@ -55,14 +80,19 @@ spec: fieldRef: fieldPath: metadata.namespace {{- if (and .Values.global.acls.bootstrapToken.secretName .Values.global.acls.bootstrapToken.secretKey) }} + {{- if .Values.global.secretsBackend.vault.enabled }} + - name: CONSUL_HTTP_TOKEN_FILE + value: /vault/secrets/bootstrap-token + {{- else }} - name: CONSUL_HTTP_TOKEN valueFrom: secretKeyRef: name: {{ .Values.global.acls.bootstrapToken.secretName }} key: {{ .Values.global.acls.bootstrapToken.secretKey }} {{- end }} + {{- end }} {{- if .Values.global.tls.enabled }} - {{- if not .Values.externalServers.useSystemRoots }} + {{- if not (or .Values.externalServers.useSystemRoots .Values.global.secretsBackend.vault.enabled) }} volumeMounts: - name: consul-ca-cert mountPath: /consul/tls/ca @@ -86,10 +116,14 @@ spec: {{- if .Values.global.tls.enabled }} -use-https \ {{- if not .Values.externalServers.useSystemRoots }} - -consul-ca-cert=/consul/tls/ca/tls.crt \ + {{- if .Values.global.secretsBackend.vault.enabled }} + -ca-file=/vault/secrets/serverca.crt \ + {{- else }} + -ca-file=/consul/tls/ca/tls.crt \ + {{- end }} {{- end }} {{- if .Values.externalServers.tlsServerName }} - -consul-tls-server-name={{ .Values.externalServers.tlsServerName }} \ + -tls-server-name={{ .Values.externalServers.tlsServerName }} \ {{- end }} {{- end }} -partition-name={{ .Values.global.adminPartitions.name }} diff --git a/charts/consul/templates/server-acl-init-job.yaml b/charts/consul/templates/server-acl-init-job.yaml index 27bc8c1bf0..7b4d46577b 100644 --- a/charts/consul/templates/server-acl-init-job.yaml +++ b/charts/consul/templates/server-acl-init-job.yaml @@ -4,8 +4,10 @@ {{- if and .Values.global.acls.createReplicationToken (not .Values.global.acls.manageSystemACLs) }}{{ fail "if global.acls.createReplicationToken is true, global.acls.manageSystemACLs must be true" }}{{ end -}} {{- if .Values.global.bootstrapACLs }}{{ fail "global.bootstrapACLs was removed, use global.acls.manageSystemACLs instead" }}{{ end -}} {{- if .Values.global.acls.manageSystemACLs }} -{{- if and .Values.global.secretsBackend.vault.enabled .Values.global.acls.replicationToken.secretName (not .Values.global.secretsBackend.vault.manageSystemACLsRole) }}{{ fail "global.secretsBackend.vault.manageSystemACLsRole must be set if global.secretsBackend.vault.enabled is true and global.acls.replicationToken is provided" }}{{ end -}} +{{- if or (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.acls.bootstrapToken.secretKey)) (and .Values.global.acls.bootstrapToken.secretKey (not .Values.global.acls.bootstrapToken.secretName))}}{{ fail "both global.acls.bootstrapToken.secretKey and global.acls.bootstrapToken.secretName must be set if one of them is provided" }}{{ end -}} {{- if or (and .Values.global.acls.replicationToken.secretName (not .Values.global.acls.replicationToken.secretKey)) (and .Values.global.acls.replicationToken.secretKey (not .Values.global.acls.replicationToken.secretName))}}{{ fail "both global.acls.replicationToken.secretKey and global.acls.replicationToken.secretName must be set if one of them is provided" }}{{ end -}} +{{- if (and .Values.global.secretsBackend.vault.enabled (and (not .Values.global.acls.bootstrapToken.secretName) (not .Values.global.acls.replicationToken.secretName ))) }}{{fail "global.acls.bootstrapToken or global.acls.replicationToken must be provided when global.secretsBackend.vault.enabled and global.acls.manageSystemACLs are true" }}{{ end -}} +{{- if (and .Values.global.secretsBackend.vault.enabled (not .Values.global.secretsBackend.vault.manageSystemACLsRole)) }}{{fail "global.secretsBackend.vault.manageSystemACLsRole is required when global.secretsBackend.vault.enabled and global.acls.manageSystemACLs are true" }}{{ end -}} {{- /* We don't render this job when server.updatePartition > 0 because that means a server rollout is in progress and this job won't complete unless the rollout is finished (which won't happen until the partition is 0). @@ -36,11 +38,25 @@ spec: component: server-acl-init annotations: "consul.hashicorp.com/connect-inject": "false" - {{- if (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) }} + {{- if .Values.global.secretsBackend.vault.enabled }} "vault.hashicorp.com/agent-pre-populate-only": "true" "vault.hashicorp.com/agent-inject": "true" + {{- if .Values.global.acls.bootstrapToken.secretName }} + {{- with .Values.global.acls.bootstrapToken }} + "vault.hashicorp.com/agent-inject-secret-bootstrap-token": "{{ .secretName }}" + "vault.hashicorp.com/agent-inject-template-bootstrap-token": {{ template "consul.vaultSecretTemplate" . }} + {{- end }} + {{- end }} + {{- if .Values.global.acls.partitionToken.secretName }} + {{- with .Values.global.acls.partitionToken }} + "vault.hashicorp.com/agent-inject-secret-partition-token": "{{ .secretName }}" + "vault.hashicorp.com/agent-inject-template-partition-token": {{ template "consul.vaultSecretTemplate" . }} + {{- end }} + {{- end }} + {{- if .Values.global.tls.enabled }} "vault.hashicorp.com/agent-inject-secret-serverca.crt": {{ .Values.global.tls.caCert.secretName }} "vault.hashicorp.com/agent-inject-template-serverca.crt": {{ template "consul.serverTLSCATemplate" . }} + {{- end }} {{- if .Values.global.secretsBackend.vault.manageSystemACLsRole }} "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.manageSystemACLsRole }} {{- else if .Values.global.tls.enabled }} @@ -61,7 +77,7 @@ spec: spec: restartPolicy: Never serviceAccountName: {{ template "consul.fullname" . }}-server-acl-init - {{- if (or .Values.global.tls.enabled .Values.global.acls.replicationToken.secretName (and .Values.global.acls.bootstrapToken.secretName .Values.global.acls.bootstrapToken.secretKey)) }} + {{- if (or .Values.global.tls.enabled .Values.global.acls.replicationToken.secretName .Values.global.acls.bootstrapToken.secretName) }} volumes: {{- if and .Values.global.tls.enabled (not .Values.global.secretsBackend.vault.enabled) }} - name: consul-ca-cert @@ -75,7 +91,7 @@ spec: - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} path: tls.crt {{- end }} - {{- if (and .Values.global.acls.bootstrapToken.secretName .Values.global.acls.bootstrapToken.secretKey) }} + {{- if (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.secretsBackend.vault.enabled)) }} - name: bootstrap-token secret: secretName: {{ .Values.global.acls.bootstrapToken.secretName }} @@ -99,14 +115,14 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - {{- if (or .Values.global.tls.enabled .Values.global.acls.replicationToken.secretName (and .Values.global.acls.bootstrapToken.secretName .Values.global.acls.bootstrapToken.secretKey)) }} + {{- if (or .Values.global.tls.enabled .Values.global.acls.replicationToken.secretName .Values.global.acls.bootstrapToken.secretName) }} volumeMounts: {{- if and .Values.global.tls.enabled (not .Values.global.secretsBackend.vault.enabled) }} - name: consul-ca-cert mountPath: /consul/tls/ca readOnly: true {{- end }} - {{- if (and .Values.global.acls.bootstrapToken.secretName .Values.global.acls.bootstrapToken.secretKey) }} + {{- if (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.secretsBackend.vault.enabled)) }} - name: bootstrap-token mountPath: /consul/acl/tokens readOnly: true @@ -127,6 +143,7 @@ spec: -log-json={{ .Values.global.logJSON }} \ -resource-prefix=${CONSUL_FULLNAME} \ -k8s-namespace={{ .Release.Namespace }} \ + -set-server-tokens={{ $serverEnabled }} \ {{- if .Values.externalServers.enabled }} {{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} @@ -158,7 +175,7 @@ spec: {{- end }} {{- if .Values.syncCatalog.enabled }} - -create-sync-token=true \ + -sync-catalog=true \ {{- if .Values.syncCatalog.consulNodeName }} -sync-consul-node-name={{ .Values.syncCatalog.consulNodeName }} \ {{- end }} @@ -172,14 +189,18 @@ spec: {{- end }} {{- if (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} - -create-inject-token=true \ + -connect-inject=true \ {{- if and .Values.externalServers.enabled .Values.externalServers.k8sAuthMethodHost }} - -inject-auth-method-host={{ .Values.externalServers.k8sAuthMethodHost }} \ + -auth-method-host={{ .Values.externalServers.k8sAuthMethodHost }} \ + {{- end }} {{- end }} + + {{- if .Values.global.federation.k8sAuthMethodHost }} + -auth-method-host={{ .Values.global.federation.k8sAuthMethodHost }} \ {{- end }} {{- if .Values.meshGateway.enabled }} - -create-mesh-gateway-token=true \ + -mesh-gateway=true \ {{- end }} {{- if .Values.ingressGateways.enabled }} @@ -225,11 +246,11 @@ spec: {{- end }} {{- if .Values.client.snapshotAgent.enabled }} - -create-snapshot-agent-token=true \ + -snapshot-agent=true \ {{- end }} {{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} - -create-client-token=false \ + -client=false \ {{- end }} {{- if .Values.global.acls.createReplicationToken }} @@ -240,22 +261,30 @@ spec: -federation=true \ {{- end }} - {{- if (and .Values.global.acls.bootstrapToken.secretName .Values.global.acls.bootstrapToken.secretKey) }} + {{- if .Values.global.acls.bootstrapToken.secretName }} + {{- if .Values.global.secretsBackend.vault.enabled }} + -bootstrap-token-file=/vault/secrets/bootstrap-token \ + {{- else }} -bootstrap-token-file=/consul/acl/tokens/bootstrap-token \ - {{- else if .Values.global.acls.replicationToken.secretName }} + {{- end }} + {{- end }} + {{- if .Values.global.acls.replicationToken.secretName }} {{- if .Values.global.secretsBackend.vault.enabled }} -acl-replication-token-file=/vault/secrets/replication-token \ {{- else }} -acl-replication-token-file=/consul/acl/tokens/acl-replication-token \ {{- end }} {{- end }} + {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.acls.partitionToken.secretName }} + -partition-token-file=/vault/secrets/partition-token \ + {{- end }} {{- if .Values.controller.enabled }} - -create-controller-token=true \ + -controller=true \ {{- end }} {{- if .Values.apiGateway.enabled }} - -create-api-gateway-token=true \ + -api-gateway-controller=true \ {{- end }} {{- if .Values.global.enableConsulNamespaces }} diff --git a/charts/consul/templates/server-acl-init-role.yaml b/charts/consul/templates/server-acl-init-role.yaml index e828ae9b3f..eb7b6a928e 100644 --- a/charts/consul/templates/server-acl-init-role.yaml +++ b/charts/consul/templates/server-acl-init-role.yaml @@ -13,28 +13,26 @@ metadata: release: {{ .Release.Name }} component: server-acl-init rules: - - apiGroups: [""] - resources: - - secrets - verbs: - - create - - get -{{- if .Values.connectInject.enabled }} - - apiGroups: [""] - resources: - - serviceaccounts - resourceNames: - - {{ template "consul.fullname" . }}-connect-injector - verbs: - - get -{{- end }} +- apiGroups: [ "" ] + resources: + - secrets + verbs: + - create + - get +- apiGroups: [ "" ] + resources: + - serviceaccounts + resourceNames: + - {{ template "consul.fullname" . }}-auth-method + verbs: + - get {{- if .Values.global.enablePodSecurityPolicies }} - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - resourceNames: - - {{ template "consul.fullname" . }}-server-acl-init - verbs: - - use +- apiGroups: [ "policy" ] + resources: [ "podsecuritypolicies" ] + resourceNames: + - {{ template "consul.fullname" . }}-server-acl-init + verbs: + - use {{- end }} {{- end }} {{- end }} diff --git a/charts/consul/templates/server-statefulset.yaml b/charts/consul/templates/server-statefulset.yaml index 0d669f3a81..d83e9d5c50 100644 --- a/charts/consul/templates/server-statefulset.yaml +++ b/charts/consul/templates/server-statefulset.yaml @@ -15,6 +15,8 @@ {{- if (and (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) (not .Values.global.secretsBackend.vault.consulCARole)) }}{{ fail "global.secretsBackend.vault.consulCARole must be provided if global.secretsBackend.vault.enabled=true and global.tls.enabled=true" }}{{ end -}} {{- if (and .Values.global.enterpriseLicense.secretName (not .Values.global.enterpriseLicense.secretKey)) }}{{fail "enterpriseLicense.secretKey and secretName must both be specified." }}{{ end -}} {{- if (and (not .Values.global.enterpriseLicense.secretName) .Values.global.enterpriseLicense.secretKey) }}{{fail "enterpriseLicense.secretKey and secretName must both be specified." }}{{ end -}} +{{- if (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.acls.bootstrapToken.secretKey)) }}{{fail "both global.acls.bootstrapToken.secretKey and global.acls.bootstrapToken.secretName must be set if one of them is provided." }}{{ end -}} +{{- if (and (not .Values.global.acls.bootstrapToken.secretName) .Values.global.acls.bootstrapToken.secretKey) }}{{fail "both global.acls.bootstrapToken.secretKey and global.acls.bootstrapToken.secretName must be set if one of them is provided." }}{{ end -}} # StatefulSet to run the actual Consul server cluster. apiVersion: apps/v1 kind: StatefulSet @@ -81,6 +83,10 @@ spec: "vault.hashicorp.com/agent-inject-secret-replication-token-config.hcl": "{{ .Values.global.acls.replicationToken.secretName }}" "vault.hashicorp.com/agent-inject-template-replication-token-config.hcl": {{ template "consul.vaultReplicationTokenConfigTemplate" . }} {{- end }} + {{- if (and .Values.global.acls.manageSystemACLs .Values.global.acls.bootstrapToken.secretName) }} + "vault.hashicorp.com/agent-inject-secret-bootstrap-token-config.hcl": "{{ .Values.global.acls.bootstrapToken.secretName }}" + "vault.hashicorp.com/agent-inject-template-bootstrap-token-config.hcl": {{ template "consul.vaultBootstrapTokenConfigTemplate" . }} + {{- end }} {{- if .Values.global.secretsBackend.vault.agentAnnotations }} {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} {{- end }} @@ -237,6 +243,13 @@ spec: value: /consul/license/{{ .Values.global.enterpriseLicense.secretKey }} {{- end }} {{- end }} + {{- if and (not .Values.global.secretsBackend.vault.enabled) .Values.global.acls.bootstrapToken.secretName }} + - name: ACL_BOOTSTRAP_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.global.acls.bootstrapToken.secretName | quote }} + key: {{ .Values.global.acls.bootstrapToken.secretKey | quote }} + {{- end }} {{- if (and .Values.global.acls.replicationToken.secretName .Values.global.acls.replicationToken.secretKey (not .Values.global.secretsBackend.vault.enabled)) }} - name: ACL_REPLICATION_TOKEN valueFrom: @@ -250,7 +263,7 @@ spec: - "-ec" - | CONSUL_FULLNAME="{{template "consul.fullname" . }}" - + {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.gossipEncryption.secretName }} GOSSIP_KEY=`cat /vault/secrets/gossip.txt` {{- end }} @@ -290,14 +303,6 @@ spec: {{- end }} -client=0.0.0.0 \ -config-dir=/consul/config \ - {{- /* Always include the extraVolumes at the end so that users can - override other Consul settings. The last -config-dir takes - precedence. */}} - {{- range .Values.server.extraVolumes }} - {{- if .load }} - -config-dir=/consul/userconfig/{{ .name }} \ - {{- end }} - {{- end }} -datacenter={{ .Values.global.datacenter }} \ -data-dir=/consul/data \ -domain={{ .Values.global.domain }} \ @@ -337,6 +342,19 @@ spec: {{- if (and .Values.dns.enabled .Values.dns.enableRedirection) }} $recursor_flags \ {{- end }} + {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.acls.bootstrapToken.secretName }} + -config-file=/vault/secrets/bootstrap-token-config.hcl \ + {{- else if (and (not .Values.global.secretsBackend.vault.enabled) .Values.global.acls.bootstrapToken.secretName) }} + -hcl="acl { tokens { initial_management = \"${ACL_BOOTSTRAP_TOKEN}\" } }" \ + {{- end }} + {{- /* Always include the extraVolumes at the end so that users can + override other Consul settings. The last -config-dir takes + precedence. */}} + {{- range .Values.server.extraVolumes }} + {{- if .load }} + -config-dir=/consul/userconfig/{{ .name }} \ + {{- end }} + {{- end }} -config-file=/consul/extra-config/extra-from-values.json \ -server volumeMounts: diff --git a/charts/consul/templates/sync-catalog-clusterrole.yaml b/charts/consul/templates/sync-catalog-clusterrole.yaml index 5ceeb03d47..0b0837c0df 100644 --- a/charts/consul/templates/sync-catalog-clusterrole.yaml +++ b/charts/consul/templates/sync-catalog-clusterrole.yaml @@ -30,15 +30,6 @@ rules: - nodes verbs: - get -{{- if .Values.global.acls.manageSystemACLs }} - - apiGroups: [""] - resources: - - secrets - resourceNames: - - {{ template "consul.fullname" . }}-catalog-sync-acl-token - verbs: - - get -{{- end }} {{- if .Values.global.enablePodSecurityPolicies }} - apiGroups: ["policy"] resources: ["podsecuritypolicies"] diff --git a/charts/consul/templates/sync-catalog-deployment.yaml b/charts/consul/templates/sync-catalog-deployment.yaml index 2aedc54460..ba6c347b94 100644 --- a/charts/consul/templates/sync-catalog-deployment.yaml +++ b/charts/consul/templates/sync-catalog-deployment.yaml @@ -49,8 +49,11 @@ spec: {{- end }} spec: serviceAccountName: {{ template "consul.fullname" . }}-sync-catalog - {{- if .Values.global.tls.enabled }} volumes: + - name: consul-data + emptyDir: + medium: "Memory" + {{- if .Values.global.tls.enabled }} {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} - name: consul-ca-cert secret: @@ -70,9 +73,13 @@ spec: {{- end }} {{- end }} containers: - - name: consul-sync-catalog + - name: sync-catalog image: "{{ default .Values.global.imageK8S .Values.syncCatalog.image }}" env: + {{- if .Values.global.acls.manageSystemACLs }} + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/login/acl-token" + {{- end }} - name: HOST_IP valueFrom: fieldRef: @@ -88,13 +95,6 @@ spec: name: {{ .Values.syncCatalog.aclSyncToken.secretName }} key: {{ .Values.syncCatalog.aclSyncToken.secretKey }} {{- end }} - {{- if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_HTTP_TOKEN - valueFrom: - secretKeyRef: - name: "{{ template "consul.fullname" . }}-catalog-sync-acl-token" - key: "token" - {{- end}} {{- if .Values.global.tls.enabled }} {{- if .Values.client.enabled }} - name: CONSUL_HTTP_ADDR @@ -114,16 +114,19 @@ spec: value: http://{{ template "consul.fullname" . }}-server:8500 {{- end }} {{- end }} - {{- if .Values.global.tls.enabled }} volumeMounts: - {{- if (and .Values.global.tls.enableAutoEncrypt $clientEnabled) }} + - mountPath: /consul/login + name: consul-data + readOnly: true + {{- if .Values.global.tls.enabled }} + {{- if and .Values.global.tls.enableAutoEncrypt $clientEnabled }} - name: consul-auto-encrypt-ca-cert {{- else }} - name: consul-ca-cert {{- end }} mountPath: /consul/tls/ca readOnly: true - {{- end }} + {{- end }} command: - "/bin/sh" - "-ec" @@ -188,6 +191,16 @@ spec: -consul-cross-namespace-acl-policy=cross-namespace-policy \ {{- end }} {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane consul-logout + {{- end }} livenessProbe: httpGet: path: /health/ready @@ -214,16 +227,57 @@ spec: {{- end }} {{- if or .Values.global.acls.manageSystemACLs (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt $clientEnabled) }} initContainers: + {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt $clientEnabled) }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} {{- if .Values.global.acls.manageSystemACLs }} - - name: sync-acl-init + - name: sync-catalog-acl-init + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} image: {{ .Values.global.imageK8S }} + volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: false + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} command: - "/bin/sh" - "-ec" - | consul-k8s-control-plane acl-init \ - -secret-name="{{ template "consul.fullname" . }}-catalog-sync-acl-token" \ - -k8s-namespace={{ .Release.Namespace }} + -component-name=sync-catalog \ + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ + -primary-datacenter={{ .Values.global.federation.primaryDatacenter }} \ + {{- else }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -log-level={{ default .Values.global.logLevel .Values.syncCatalog.logLevel }} \ + -log-json={{ .Values.global.logJSON }} resources: requests: memory: "25Mi" @@ -232,9 +286,6 @@ spec: memory: "25Mi" cpu: "50m" {{- end }} - {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt $clientEnabled) }} - {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} - {{- end }} {{- end }} {{- if .Values.syncCatalog.priorityClassName }} priorityClassName: {{ .Values.syncCatalog.priorityClassName | quote }} diff --git a/charts/consul/templates/terminating-gateways-deployment.yaml b/charts/consul/templates/terminating-gateways-deployment.yaml index 902329a74a..5a7abb2304 100644 --- a/charts/consul/templates/terminating-gateways-deployment.yaml +++ b/charts/consul/templates/terminating-gateways-deployment.yaml @@ -9,6 +9,22 @@ {{- $defaults := .Values.terminatingGateways.defaults }} {{- $names := dict }} +{{- $gateways := .Values.terminatingGateways.gateways }} +{{- range $outerTerminatingIndex, $outerTerminatingVal := $gateways }} + +{{- range $innerTerminatingIndex, $innerTerminatingVal := $gateways }} +{{- if (and (ne $outerTerminatingIndex $innerTerminatingIndex) (eq $outerTerminatingVal.name $innerTerminatingVal.name)) }} +{{ fail (cat "terminating gateways must have unique names but found duplicate name" $innerTerminatingVal.name) }} +{{ end -}} +{{ end -}} + +{{- range $outerIngressIndex, $outerIngressVal := $root.Values.ingressGateways.gateways }} +{{- if (eq $outerTerminatingVal.name $outerIngressVal.name) }} +{{ fail (cat "terminating gateways cannot have duplicate names of any ingress gateways but found duplicate name" $outerTerminatingVal.name) }} +{{ end -}} +{{ end -}} +{{ end -}} + {{- range .Values.terminatingGateways.gateways }} {{- if empty .name }} @@ -154,8 +170,8 @@ spec: {{- if (and $root.Values.global.tls.enabled $root.Values.global.tls.enableAutoEncrypt) }} {{- include "consul.getAutoEncryptClientCA" $root | nindent 8 }} {{- end }} - # service-init registers the terminating gateway service. - - name: service-init + # terminating-gateway-init registers the terminating gateway service with Consul. + - name: terminating-gateway-init image: {{ $root.Values.global.imageK8S }} env: - name: HOST_IP @@ -179,15 +195,20 @@ spec: - name: CONSUL_HTTP_ADDR value: http://$(HOST_IP):8500 {{- end }} - command: + command: - "/bin/sh" - "-ec" - | {{- if $root.Values.global.acls.manageSystemACLs }} consul-k8s-control-plane acl-init \ - -secret-name="{{ template "consul.fullname" $root }}-{{ .name }}-terminating-gateway-acl-token" \ - -k8s-namespace={{ $root.Release.Namespace }} \ - -token-sink-file=/consul/service/acl-token + -component-name=terminating-gateway/{{ template "consul.fullname" $root }}-{{ .name }} \ + -acl-auth-method={{ template "consul.fullname" $root }}-k8s-component-auth-method \ + {{- if $root.Values.global.adminPartitions.enabled }} + -partition={{ $root.Values.global.adminPartitions.name }} \ + {{- end }} + -token-sink-file=/consul/service/acl-token \ + -log-level={{ default $root.Values.global.logLevel }} \ + -log-json={{ $root.Values.global.logJSON }} {{- end }} cat > /consul/service/service.hcl << EOF @@ -252,6 +273,9 @@ spec: volumeMounts: - name: consul-bin mountPath: /consul-bin + - mountPath: /consul/service + name: consul-service + readOnly: true {{- if $root.Values.global.tls.enabled }} {{- if $root.Values.global.tls.enableAutoEncrypt }} - name: consul-auto-encrypt-ca-cert @@ -280,12 +304,9 @@ spec: fieldRef: fieldPath: metadata.name {{- if $root.Values.global.acls.manageSystemACLs }} - - name: CONSUL_HTTP_TOKEN - valueFrom: - secretKeyRef: - name: "{{ template "consul.fullname" $root }}-{{ .name }}-terminating-gateway-acl-token" - key: "token" - {{- end}} + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/service/acl-token" + {{- end }} {{- if $root.Values.global.tls.enabled }} - name: CONSUL_HTTP_ADDR value: https://$(HOST_IP):8501 @@ -345,6 +366,9 @@ spec: -partition={{ $root.Values.global.adminPartitions.name }} \ {{- end }} -id="${POD_NAME}" + {{- if $root.Values.global.acls.manageSystemACLs }} + - "/consul-bin/consul logout" + {{- end}} # consul-sidecar ensures the terminating gateway is always registered with # the local Consul agent, even if it loses the initial registration. diff --git a/charts/consul/templates/terminating-gateways-role.yaml b/charts/consul/templates/terminating-gateways-role.yaml index 8852ffb90e..4ae280ca81 100644 --- a/charts/consul/templates/terminating-gateways-role.yaml +++ b/charts/consul/templates/terminating-gateways-role.yaml @@ -31,7 +31,7 @@ rules: resources: - secrets resourceNames: - - {{ template "consul.fullname" $root }}-{{ .name }}-terminating-gateway-acl-token + - {{ template "consul.fullname" $root }}-{{ .name }}-acl-token verbs: - get {{- end }} diff --git a/charts/consul/templates/tls-init-job.yaml b/charts/consul/templates/tls-init-job.yaml index ba75d94460..47dd6462b0 100644 --- a/charts/consul/templates/tls-init-job.yaml +++ b/charts/consul/templates/tls-init-job.yaml @@ -77,7 +77,9 @@ spec: -additional-dnsname="{{ template "consul.fullname" . }}-server" \ -additional-dnsname="*.{{ template "consul.fullname" . }}-server" \ -additional-dnsname="*.{{ template "consul.fullname" . }}-server.${NAMESPACE}" \ + -additional-dnsname="{{ template "consul.fullname" . }}-server.${NAMESPACE}" \ -additional-dnsname="*.{{ template "consul.fullname" . }}-server.${NAMESPACE}.svc" \ + -additional-dnsname="{{ template "consul.fullname" . }}-server.${NAMESPACE}.svc" \ -additional-dnsname="*.server.{{ .Values.global.datacenter }}.{{ .Values.global.domain }}" \ {{- range .Values.global.tls.serverAdditionalIPSANs }} -additional-ipaddress={{ . }} \ diff --git a/charts/consul/test/unit/api-gateway-controller-deployment.bats b/charts/consul/test/unit/api-gateway-controller-deployment.bats index 6810c5dde0..6e9686d12f 100755 --- a/charts/consul/test/unit/api-gateway-controller-deployment.bats +++ b/charts/consul/test/unit/api-gateway-controller-deployment.bats @@ -227,7 +227,7 @@ load _helpers --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.initContainers | length == 2' | tee /dev/stderr) + yq '.spec.template.spec.initContainers | length == 3' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -250,7 +250,7 @@ load _helpers #-------------------------------------------------------------------- # global.acls.manageSystemACLs -@test "apiGateway/Deployment: CONSUL_HTTP_TOKEN env variable created when global.acls.manageSystemACLs=true" { +@test "apiGateway/Deployment: consul-logout preStop hook is added when ACLs are enabled" { cd `chart_dir` local object=$(helm template \ -s templates/api-gateway-controller-deployment.yaml \ @@ -258,15 +258,31 @@ load _helpers --set 'apiGateway.image=foo' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[].name] ' | tee /dev/stderr) + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("consul logout"))' | tee /dev/stderr) + [ "${object}" = "true" ] +} - local actual=$(echo $object | - yq 'any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) - [ "${actual}" = "true" ] +@test "apiGateway/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[1].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} - local actual=$(echo $object | - yq 'map(select(test("CONSUL_HTTP_TOKEN"))) | length' | tee /dev/stderr) - [ "${actual}" = "1" ] +@test "apiGateway/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[1].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } @test "apiGateway/Deployment: init container is created when global.acls.manageSystemACLs=true" { @@ -277,7 +293,7 @@ load _helpers --set 'apiGateway.image=foo' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[1]' | tee /dev/stderr) local actual=$(echo $object | yq -r '.name' | tee /dev/stderr) @@ -286,6 +302,264 @@ load _helpers local actual=$(echo $object | yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "api-gateway-controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "api-gateway-controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=RELEASE-NAME-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "api-gateway-controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: init container for copy consul is created when global.acls.manageSystemACLs=true" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "copy-consul-bin")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("cp"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.volumeMounts[0] | any(contains("consul-bin"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: volumeMount for copy consul is created on container when global.acls.manageSystemACLs=true" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[0] | any(contains("consul-bin"))' | tee /dev/stderr) + + [ "${object}" = "true" ] +} + +@test "apiGateway/Deployment: volume for copy consul is created when global.acls.manageSystemACLs=true" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[0] | any(contains("consul-bin"))' | tee /dev/stderr) + + [ "${object}" = "true" ] +} + +@test "apiGateway/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] +} + +#-------------------------------------------------------------------- +# resources + +@test "apiGateway/Deployment: resources has default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + + [ $(echo "${actual}" | yq -r '.requests.memory') = "100Mi" ] + [ $(echo "${actual}" | yq -r '.requests.cpu') = "100m" ] + [ $(echo "${actual}" | yq -r '.limits.memory') = "100Mi" ] + [ $(echo "${actual}" | yq -r '.limits.cpu') = "100m" ] +} + +@test "apiGateway/Deployment: resources can be overridden" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'apiGateway.resources.foo=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +#-------------------------------------------------------------------- +# init container resources + +@test "apiGateway/Deployment: init container has default resources" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + [ $(echo "${actual}" | yq -r '.requests.memory') = "25Mi" ] + [ $(echo "${actual}" | yq -r '.requests.cpu') = "50m" ] + [ $(echo "${actual}" | yq -r '.limits.memory') = "150Mi" ] + [ $(echo "${actual}" | yq -r '.limits.cpu') = "50m" ] +} + +@test "apiGateway/Deployment: init container resources can be set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'apiGateway.initCopyConsulContainer.resources.requests.memory=memory' \ + --set 'apiGateway.initCopyConsulContainer.resources.requests.cpu=cpu' \ + --set 'apiGateway.initCopyConsulContainer.resources.limits.memory=memory2' \ + --set 'apiGateway.initCopyConsulContainer.resources.limits.cpu=cpu2' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] } #-------------------------------------------------------------------- diff --git a/charts/consul/test/unit/auth-method-clusterrole.bats b/charts/consul/test/unit/auth-method-clusterrole.bats new file mode 100644 index 0000000000..935a448161 --- /dev/null +++ b/charts/consul/test/unit/auth-method-clusterrole.bats @@ -0,0 +1,20 @@ +#!/usr/bin/env bats + +load _helpers + +@test "auth-method/ClusterRole: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/auth-method-clusterrole.yaml \ + . +} + +@test "auth-method/ClusterRole: enabled with global.acls.manageSystemACLs true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/auth-method-clusterrole.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} \ No newline at end of file diff --git a/charts/consul/test/unit/auth-method-clusterrolebinding.bats b/charts/consul/test/unit/auth-method-clusterrolebinding.bats new file mode 100644 index 0000000000..dcb293ba14 --- /dev/null +++ b/charts/consul/test/unit/auth-method-clusterrolebinding.bats @@ -0,0 +1,20 @@ +#!/usr/bin/env bats + +load _helpers + +@test "auth-method/ClusterRoleBinding: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/auth-method-clusterrolebinding.yaml \ + . +} + +@test "auth-method/ClusterRoleBinding: enabled with global.acls.manageSystemACLs true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/auth-method-clusterrolebinding.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} \ No newline at end of file diff --git a/charts/consul/test/unit/auth-method-serviceaccount.bats b/charts/consul/test/unit/auth-method-serviceaccount.bats new file mode 100644 index 0000000000..9413a03291 --- /dev/null +++ b/charts/consul/test/unit/auth-method-serviceaccount.bats @@ -0,0 +1,41 @@ +#!/usr/bin/env bats + +load _helpers + +@test "auth-method/ServiceAccount: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/auth-method-serviceaccount.yaml \ + . +} + +@test "auth-method/ServiceAccount: enabled with global.acls.manageSystemACLs.enabled true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/auth-method-serviceaccount.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.imagePullSecrets + +@test "auth-method/ServiceAccount: can set image pull secrets" { + cd `chart_dir` + local object=$(helm template \ + -s templates/auth-method-serviceaccount.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.imagePullSecrets[0].name=my-secret' \ + --set 'global.imagePullSecrets[1].name=my-secret2' \ + . | tee /dev/stderr) + + local actual=$(echo "$object" | + yq -r '.imagePullSecrets[0].name' | tee /dev/stderr) + [ "${actual}" = "my-secret" ] + + local actual=$(echo "$object" | + yq -r '.imagePullSecrets[1].name' | tee /dev/stderr) + [ "${actual}" = "my-secret2" ] +} diff --git a/charts/consul/test/unit/client-daemonset.bats b/charts/consul/test/unit/client-daemonset.bats index eb1c2b5048..57204ef2a9 100755 --- a/charts/consul/test/unit/client-daemonset.bats +++ b/charts/consul/test/unit/client-daemonset.bats @@ -114,6 +114,7 @@ load _helpers -s templates/client-daemonset.yaml \ --set 'server.enabled=false' \ --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo' \ --set 'client.join[0]=1.1.1.1' \ --set 'client.join[1]=2.2.2.2' \ . | tee /dev/stderr | @@ -132,6 +133,7 @@ load _helpers -s templates/client-daemonset.yaml \ --set 'server.enabled=false' \ --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo' \ --set 'client.join[0]=provider=my-cloud config=val' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].command') @@ -804,6 +806,57 @@ load _helpers [ "${actual}" = "true" ] } +@test "client/DaemonSet: Adds consul envvars CONSUL_HTTP_ADDR on acl-init init container when ACLs are enabled and tls is enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "https://RELEASE-NAME-consul-server.default.svc:8501" ] +} + +@test "client/DaemonSet: Adds consul envvars CONSUL_HTTP_ADDR on acl-init init container when ACLs are enabled and tls is not enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "http://RELEASE-NAME-consul-server.default.svc:8500" ] +} + +@test "client/DaemonSet: Does not add consul envvars CONSUL_CACERT on acl-init init container when ACLs are enabled and tls is not enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0].env[] | select(.name == "CONSUL_CACERT")' | tee /dev/stderr) + + [ "${actual}" = "" ] +} + +@test "client/DaemonSet: Adds consul envvars CONSUL_CACERT on acl-init init container when ACLs are enabled and tls is enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + @test "client/DaemonSet: both ACL and TLS init containers are created when global.tls.enabled=true and global.acls.manageSystemACLs=true" { cd `chart_dir` local has_acl_init_container=$(helm template \ @@ -1023,7 +1076,7 @@ load _helpers -s templates/client-daemonset.yaml \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.volumes[2].name == "aclconfig"' | tee /dev/stderr) + yq '.spec.template.spec.volumes[3].name == "aclconfig"' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1033,7 +1086,7 @@ load _helpers -s templates/client-daemonset.yaml \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].volumeMounts[2]' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].volumeMounts[3]' | tee /dev/stderr) local actual=$(echo $object | yq -r '.name' | tee /dev/stderr) @@ -1054,10 +1107,54 @@ load _helpers [ "${actual}" = "true" ] } -@test "client/DaemonSet: init container is created when global.acls.manageSystemACLs=true" { +@test "client/DaemonSet: init container is created when global.acls.manageSystemACLs=true and command args are properly set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "client-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + +local actual=$(echo $object | + yq -r '.command | any(contains("secret-name"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("k8s-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("component-name=client"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("init-type=\"client\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("acl-auth-method=\"RELEASE-NAME-consul-k8s-component-auth-method\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("log-level=info"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("log-json=false"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '.spec.template.spec.initContainers[] | select(.name == "client-acl-init")' | tee /dev/stderr) @@ -1065,6 +1162,306 @@ load _helpers local actual=$(echo $object | yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("secret-name"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("k8s-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("component-name=client"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("init-type=\"client\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("acl-auth-method=\"RELEASE-NAME-consul-k8s-component-auth-method\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("log-level=info"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("log-json=false"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=false' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/DaemonSet: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: consul-logout preStop hook is added when ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("consul logout"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: Adds consul login volume when ACLs are enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | yq '.spec.template.spec.volumes[2]' | tee /dev/stderr) + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "consul-data" ] + + local actual=$(echo $object | + yq -r '.emptyDir.medium' | tee /dev/stderr) + [ "${actual}" = "Memory" ] +} + +@test "client/DaemonSet: Adds consul login volumeMount to client container when ACLs are enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | yq '.spec.template.spec.containers[0].volumeMounts[2]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "consul-data" ] + + local actual=$(echo $object | + yq -r '.mountPath' | tee /dev/stderr) + [ "${actual}" = "/consul/login" ] + + local actual=$(echo $object | + yq -r '.readOnly' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: Adds consul login volumeMount to acl-init init container when ACLs are enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | yq '.spec.template.spec.initContainers[0].volumeMounts[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "consul-data" ] + + local actual=$(echo $object | + yq -r '.mountPath' | tee /dev/stderr) + [ "${actual}" = "/consul/login" ] + + local actual=$(echo $object | + yq -r '.readOnly' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/DaemonSet: Adds consul ca cert volumeMount to acl-init init container when ACLs and tls are enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + . | yq '.spec.template.spec.initContainers[0].volumeMounts[2]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "consul-ca-cert" ] + + local actual=$(echo $object | + yq -r '.mountPath' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca" ] + + local actual=$(echo $object | + yq -r '.readOnly' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/DaemonSet: Does not add consul ca cert volumeMount to acl-init init container when tls is not enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=false' \ + . | yq '.spec.template.spec.initContainers[0].volumeMounts[2]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "consul-ca-cert" ] + + local actual=$(echo $object | + yq -r '.mountPath' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca" ] + + local actual=$(echo $object | + yq -r '.readOnly' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/DaemonSet: fail when externalServers is enabled but the externalServers.hosts is not provided" { + cd `chart_dir` + run helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'externalServers.enabled=true' \ + --set 'server.enabled=false' \ + . + echo "status:$status" + [ "$status" -eq 1 ] + [[ "$output" =~ "externalServers.hosts must be set if externalServers.enabled is true" ]] +} + +@test "client/DaemonSet: server-address flag is set with hosts when externalServers.hosts are provided" { + cd `chart_dir` + local command=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'externalServers.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=foo' \ + --set 'externalServers.hosts[1]=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].command' | tee /dev/stderr) + + local actual=$(echo $command | jq -r ' . | any(contains("-server-address=\"foo\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $command | jq -r ' . | any(contains("-server-address=\"bar\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: tls-server-name flag is set when externalServers.tlsServerName is provided" { + cd `chart_dir` + local command=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'externalServers.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=computer' \ + --set 'externalServers.tlsServerName=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].command' | tee /dev/stderr) + + local actual=$(echo $command | jq -r ' . | any(contains("-tls-server-name=foo"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: tls-server-name flag is not set when externalServers.tlsServerName is not provided" { + cd `chart_dir` + local command=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'externalServers.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=computer' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].command' | tee /dev/stderr) + + local actual=$(echo $command | jq -r ' . | any(contains("-tls-server-name"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/DaemonSet: use-https flag is not set when global.tls.enabled is not provided" { + cd `chart_dir` + local command=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'externalServers.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=computer' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].command' | tee /dev/stderr) + + local actual=$(echo $command | jq -r ' . | any(contains("-use-https"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/DaemonSet: use-https flag is set when global.tls.enabled is provided and externalServers.enabled is true" { + cd `chart_dir` + local command=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'externalServers.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=computer' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].command' | tee /dev/stderr) + + local actual=$(echo $command | jq -r ' . | any(contains("-use-https"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: use-https flag is not set when global.tls.enabled is enabled but externalServers.enabled is false" { + cd `chart_dir` + local command=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'externalServers.enabled=false' \ + --set 'server.enabled=false' \ + --set 'global.tls.enabled=true' \ + --set 'externalServers.hosts[0]=computer' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].command' | tee /dev/stderr) + + local actual=$(echo $command | jq -r ' . | any(contains("-use-https"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/DaemonSet: server-port flag is not set when externalServers.enabled is false" { + cd `chart_dir` + local command=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'externalServers.enabled=false' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=computer' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].command' | tee /dev/stderr) + + local actual=$(echo $command | jq -r ' . | any(contains("-server-port"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/DaemonSet: server-port flag is set when externalServers.enabled is true" { + cd `chart_dir` + local command=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'externalServers.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=computer' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].command' | tee /dev/stderr) + + local actual=$(echo $command | jq -r ' . | any(contains("-server-port"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -1872,14 +2269,38 @@ rollingUpdate: local actual=$(echo $object | yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-enterpriselicense.txt"]' | tee /dev/stderr) [ "${actual}" = "path/to/secret" ] - local actual=$(echo $object | - yq -r '.annotations["vault.hashicorp.com/agent-inject-template-enterpriselicense.txt"]' | tee /dev/stderr) + local actual="$(echo $object | yq -r '.annotations["vault.hashicorp.com/agent-inject-template-enterpriselicense.txt"]' | tee /dev/stderr)" local expected=$'{{- with secret \"path/to/secret\" -}}\n{{- .Data.data.enterpriselicense -}}\n{{- end -}}' [ "${actual}" = "${expected}" ] } +@test "client/DaemonSet: vault enterprise license annotations are not set when ent license is set and ACLs are enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=aclsrole' \ + --set 'global.enterpriseLicense.secretName=path/to/secret' \ + --set 'global.enterpriseLicense.secretKey=enterpriselicense' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.acls.bootstrapToken.secretName=boot' \ + --set 'global.acls.bootstrapToken.secretKey=token' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-enterpriselicense.txt"]' | tee /dev/stderr) + [ "${actual}" = "null" ] + + local actual="$(echo $object | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-enterpriselicense.txt"]' | tee /dev/stderr)" + [ "${actual}" = "null" ] +} + @test "client/DaemonSet: vault CONSUL_LICENSE_PATH is set to /vault/secrets/enterpriselicense.txt" { cd `chart_dir` local env=$(helm template \ @@ -1926,6 +2347,49 @@ rollingUpdate: [ "${actual}" = "" ] } +@test "client/DaemonSet: vault adds consul envvars CONSUL_CACERT on acl-init init container when ACLs are enabled and tls is enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=true' \ + --set 'global.acls.replicationToken.secretName=replication' \ + --set 'global.acls.replicationToken.secretKey=key' \ + --set 'global.tls.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'server.serverCert.secretName=pki_int/issue/test' \ + --set 'global.tls.caCert.secretName=pki_int/cert/ca' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/vault/secrets/serverca.crt" ] +} + +@test "client/DaemonSet: Vault does not add consul ca cert volumeMount to acl-init init container when ACLs are enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'server.serverCert.secretName=pki_int/issue/test' \ + --set 'global.tls.caCert.secretName=pki_int/cert/ca' \ + . | yq '.spec.template.spec.initContainers[0].volumeMounts[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "" ] +} + #-------------------------------------------------------------------- # Vault agent annotations @@ -1937,7 +2401,7 @@ rollingUpdate: --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."consul.hashicorp.com/config-checksum") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role")' | tee /dev/stderr) + yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."consul.hashicorp.com/config-checksum") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role") | del(."vault.hashicorp.com/agent-init-first")' | tee /dev/stderr) [ "${actual}" = "{}" ] } @@ -1966,4 +2430,4 @@ rollingUpdate: [ "$status" -eq 1 ] [[ "$output" =~ "global.imageK8s is not a valid key, use global.imageK8S (note the capital 'S')" ]] -} \ No newline at end of file +} diff --git a/charts/consul/test/unit/client-snapshot-agent-deployment.bats b/charts/consul/test/unit/client-snapshot-agent-deployment.bats index 8e345189d7..7a3ed272ae 100644 --- a/charts/consul/test/unit/client-snapshot-agent-deployment.bats +++ b/charts/consul/test/unit/client-snapshot-agent-deployment.bats @@ -39,6 +39,85 @@ load _helpers . } +@test "client/SnapshotAgentDeployment: when client.snapshotAgent.configSecret.secretKey!=null and client.snapshotAgent.configSecret.secretName=null, fail" { + cd `chart_dir` + run helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=' \ + --set 'client.snapshotAgent.configSecret.secretKey=bar' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.snapshotAgent.configSecret.secretKey and client.snapshotAgent.configSecret.secretName must both be specified." ]] +} + +@test "client/SnapshotAgentDeployment: when client.snapshotAgent.configSecret.secretName!=null and client.snapshotAgent.configSecret.secretKey=null, fail" { + cd `chart_dir` + run helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=foo' \ + --set 'client.snapshotAgent.configSecret.secretKey=' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.snapshotAgent.configSecret.secretKey and client.snapshotAgent.configSecret.secretName must both be specified." ]] +} + +@test "client/SnapshotAgentDeployment: adds volume for snapshot agent config secret when secret is configured" { + cd `chart_dir` + local vol=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.volumes[] | select(.name == "snapshot-config")' | tee /dev/stderr) + local actual + actual=$(echo $vol | jq -r '. .name' | tee /dev/stderr) + [ "${actual}" = 'snapshot-config' ] + + actual=$(echo $vol | jq -r '. .secret.secretName' | tee /dev/stderr) + [ "${actual}" = 'a/b/c/d' ] + + actual=$(echo $vol | jq -r '. .secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = 'snapshot-agent-config' ] + + actual=$(echo $vol | jq -r '. .secret.items[0].path' | tee /dev/stderr) + [ "${actual}" = 'snapshot-config.json' ] +} + +@test "client/SnapshotAgentDeployment: adds volume mount to snapshot container for snapshot agent config secret when secret is configured" { + cd `chart_dir` + local vol=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "snapshot-config")' | tee /dev/stderr) + local actual + actual=$(echo $vol | jq -r '. .name' | tee /dev/stderr) + [ "${actual}" = 'snapshot-config' ] + + actual=$(echo $vol | jq -r '. .readOnly' | tee /dev/stderr) + [ "${actual}" = 'true' ] + + actual=$(echo $vol | jq -r '. .mountPath' | tee /dev/stderr) + [ "${actual}" = '/consul/config' ] +} + +@test "client/SnapshotAgentDeployment: set config-dir argument on snapshot agent command to volume mount" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2] | contains("-config-dir=/consul/config")' | tee /dev/stderr) + [ "${actual}" = 'true' ] +} + #-------------------------------------------------------------------- # tolerations @@ -88,93 +167,189 @@ load _helpers } #-------------------------------------------------------------------- -# global.acls.manageSystemACLs and snapshotAgent.configSecret +# global.acls.manageSystemACLs -@test "client/SnapshotAgentDeployment: no initContainer by default" { +@test "clientSnapshotAgent/Deployment: consul-logout preStop hook is added when ACLs are enabled" { cd `chart_dir` - local actual=$(helm template \ + local object=$(helm template \ -s templates/client-snapshot-agent-deployment.yaml \ --set 'client.snapshotAgent.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.initContainers' | tee /dev/stderr) - [ "${actual}" = "null" ] + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("/bin/consul logout"))' | tee /dev/stderr) + [ "${object}" = "true" ] } -@test "client/SnapshotAgentDeployment: populates initContainer when global.acls.manageSystemACLs=true" { +@test "clientSnapshotAgent/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { cd `chart_dir` local actual=$(helm template \ -s templates/client-snapshot-agent-deployment.yaml \ --set 'client.snapshotAgent.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.initContainers | length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq '[.spec.template.spec.containers[0].env[1].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] } -@test "client/SnapshotAgentDeployment: no volumes by default" { +@test "clientSnapshotAgent/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/client-snapshot-agent-deployment.yaml \ --set 'client.snapshotAgent.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.volumes' | tee /dev/stderr) - [ "${actual}" = "null" ] + yq '[.spec.template.spec.containers[0].env[2].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "client/SnapshotAgentDeployment: populates volumes when global.acls.manageSystemACLs=true" { +@test "clientSnapshotAgent/Deployment: init container is created when global.acls.manageSystemACLs=true" { cd `chart_dir` - local actual=$(helm template \ + local object=$(helm template \ -s templates/client-snapshot-agent-deployment.yaml \ --set 'client.snapshotAgent.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.volumes | length > 0' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "snapshot-agent-acl-init" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual [ "${actual}" = "true" ] } -@test "client/SnapshotAgentDeployment: populates volumes when client.snapshotAgent.configSecret.secretName and client.snapshotAgent.configSecret secretKey are defined" { +@test "clientSnapshotAgent/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { cd `chart_dir` - local actual=$(helm template \ + local object=$(helm template \ -s templates/client-snapshot-agent-deployment.yaml \ --set 'client.snapshotAgent.enabled=true' \ - --set 'client.snapshotAgent.configSecret.secretName=secret' \ - --set 'client.snapshotAgent.configSecret.secretKey=key' \ + --set 'global.tls.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.volumes | length > 0' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[] | select(.name == "snapshot-agent-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "client/SnapshotAgentDeployment: no container volumeMounts by default" { +@test "clientSnapshotAgent/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { cd `chart_dir` - local actual=$(helm template \ + local object=$(helm template \ -s templates/client-snapshot-agent-deployment.yaml \ --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].volumeMounts' | tee /dev/stderr) - [ "${actual}" = "null" ] + yq '.spec.template.spec.initContainers[] | select(.name == "snapshot-agent-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=RELEASE-NAME-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "client/SnapshotAgentDeployment: populates container volumeMounts when global.acls.manageSystemACLs=true" { +@test "clientSnapshotAgent/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { cd `chart_dir` - local actual=$(helm template \ + local object=$(helm template \ -s templates/client-snapshot-agent-deployment.yaml \ --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].volumeMounts | length > 0' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[] | select(.name == "snapshot-agent-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "client/SnapshotAgentDeployment: populates container volumeMounts when client.snapshotAgent.configSecret.secretName and client.snapshotAgent.configSecret secretKey are defined" { +@test "clientSnapshotAgent/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { cd `chart_dir` - local actual=$(helm template \ + local object=$(helm template \ -s templates/client-snapshot-agent-deployment.yaml \ --set 'client.snapshotAgent.enabled=true' \ - --set 'client.snapshotAgent.configSecret.secretName=secret' \ - --set 'client.snapshotAgent.configSecret.secretKey=key' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].volumeMounts | length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] } #-------------------------------------------------------------------- @@ -370,15 +545,9 @@ load _helpers --set 'client.snapshotAgent.caCert=-----BEGIN CERTIFICATE----- MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].command[2]' | tee /dev/stderr) - - exp='cat < /etc/ssl/certs/custom-ca.pem ------BEGIN CERTIFICATE----- -MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL -EOF -exec /bin/consul snapshot agent \' + yq -r '.spec.template.spec.containers[0].command[2] | contains("MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL")' | tee /dev/stderr) - [ "${actual}" = "${exp}" ] + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -656,6 +825,78 @@ exec /bin/consul snapshot agent \' [ "${actual}" = "" ] } +@test "client/SnapshotAgentDeployment: vault snapshot agent config annotations are correct when enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulSnapshotAgentRole=bar' \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=path/to/secret' \ + --set 'client.snapshotAgent.configSecret.secretKey=config' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-snapshot-agent-config.json"]' | tee /dev/stderr) + [ "${actual}" = "path/to/secret" ] + + actual=$(echo $object | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-snapshot-agent-config.json"]' | tee /dev/stderr) + local expected=$'{{- with secret \"path/to/secret\" -}}\n{{- .Data.data.config -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + actual=$(echo $object | jq -r '.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +@test "client/SnapshotAgentDeployment: vault does not add volume for snapshot agent config secret" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.volumes[] | select(.name == "snapshot-config")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/SnapshotAgentDeployment: vault does not add volume mount for snapshot agent config secret" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "snapshot-config")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/SnapshotAgentDeployment: vault sets config-file argument on snapshot agent command to config downloaded by vault agent injector" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2] | contains("-config-file=/vault/secrets/snapshot-agent-config.json")' | tee /dev/stderr) + [ "${actual}" = 'true' ] +} + #-------------------------------------------------------------------- # Vault agent annotations @@ -690,4 +931,67 @@ exec /bin/consul snapshot agent \' . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) [ "${actual}" = "bar" ] -} \ No newline at end of file +} + + +@test "client/SnapshotAgentDeployment: vault properly sets vault role when global.secretsBackend.vault.consulCARole is set but global.secretsBackend.vault.consulSnapshotAgentRole is not set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=ca-role' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "ca-role" ] +} + +@test "client/SnapshotAgentDeployment: vault properly sets vault role when global.secretsBackend.vault.consulSnapshotAgentRole is set but global.secretsBackend.vault.consulCARole is not set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulSnapshotAgentRole=sa-role' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "sa-role" ] +} + +@test "client/SnapshotAgentDeployment: vault properly sets vault role to global.secretsBackend.vault.consulSnapshotAgentRole value when both global.secretsBackend.vault.consulSnapshotAgentRole and global.secretsBackend.vault.consulCARole are set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulSnapshotAgentRole=sa-role' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + --set 'global.secretsBackend.vault.consulCARole=ca-role' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "sa-role" ] +} diff --git a/charts/consul/test/unit/client-snapshot-agent-role.bats b/charts/consul/test/unit/client-snapshot-agent-role.bats index 8408010e80..86aaaf3880 100644 --- a/charts/consul/test/unit/client-snapshot-agent-role.bats +++ b/charts/consul/test/unit/client-snapshot-agent-role.bats @@ -53,31 +53,3 @@ load _helpers yq -r '.rules[0].resources[0]' | tee /dev/stderr) [ "${actual}" = "podsecuritypolicies" ] } - -#-------------------------------------------------------------------- -# global.acls.manageSystemACLs - -@test "client/SnapshotAgentRole: allows secret access with global.bootsrapACLs=true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/client-snapshot-agent-role.yaml \ - --set 'client.snapshotAgent.enabled=true' \ - --set 'client.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r '.rules[0].resources[0]' | tee /dev/stderr) - [ "${actual}" = "secrets" ] -} - -@test "client/SnapshotAgentRole: allows secret access with global.bootsrapACLs=true and global.enablePodSecurityPolicies=true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/client-snapshot-agent-role.yaml \ - --set 'client.enabled=true' \ - --set 'client.snapshotAgent.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.enablePodSecurityPolicies=true' \ - . | tee /dev/stderr | - yq -r '.rules[1].resources[0]' | tee /dev/stderr) - [ "${actual}" = "secrets" ] -} diff --git a/charts/consul/test/unit/connect-inject-authmethod-clusterrolebinding.bats b/charts/consul/test/unit/connect-inject-authmethod-clusterrolebinding.bats deleted file mode 100644 index 7a70293f16..0000000000 --- a/charts/consul/test/unit/connect-inject-authmethod-clusterrolebinding.bats +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "connectInjectAuthMethod/ClusterRoleBinding: disabled by default" { - cd `chart_dir` - assert_empty helm template \ - -s templates/connect-inject-authmethod-clusterrolebinding.yaml \ - . -} - -@test "connectInjectAuthMethod/ClusterRoleBinding: enabled with global.enabled false" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/connect-inject-authmethod-clusterrolebinding.yaml \ - --set 'global.enabled=false' \ - --set 'client.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "connectInjectAuthMethod/ClusterRoleBinding: disabled with connectInject.enabled" { - cd `chart_dir` - assert_empty helm template \ - -s templates/connect-inject-authmethod-clusterrolebinding.yaml \ - --set 'connectInject.enabled=true' \ - . -} - -@test "connectInjectAuthMethod/ClusterRoleBinding: enabled with global.acls.manageSystemACLs.enabled=true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/connect-inject-authmethod-clusterrolebinding.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} diff --git a/charts/consul/test/unit/connect-inject-clusterrole.bats b/charts/consul/test/unit/connect-inject-clusterrole.bats index db6649b33c..e954b8908a 100644 --- a/charts/consul/test/unit/connect-inject-clusterrole.bats +++ b/charts/consul/test/unit/connect-inject-clusterrole.bats @@ -53,17 +53,3 @@ load _helpers yq -r '.rules | map(select(.resources[0] == "podsecuritypolicies")) | length' | tee /dev/stderr) [ "${actual}" = "1" ] } - -#-------------------------------------------------------------------- -# global.acls.manageSystemACLs - -@test "connectInject/ClusterRole: secret access with global.acls.manageSystemACLs=true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/connect-inject-clusterrole.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r '.rules | map(select(.resources[0] == "secrets")) | length' | tee /dev/stderr) - [ "${actual}" = "1" ] -} diff --git a/charts/consul/test/unit/connect-inject-clusterrolebinding.bats b/charts/consul/test/unit/connect-inject-clusterrolebinding.bats index 1f6fc94a88..ccf30083f9 100644 --- a/charts/consul/test/unit/connect-inject-clusterrolebinding.bats +++ b/charts/consul/test/unit/connect-inject-clusterrolebinding.bats @@ -27,4 +27,4 @@ load _helpers -s templates/connect-inject-clusterrolebinding.yaml \ --set 'connectInject.enabled=false' \ . -} +} \ No newline at end of file diff --git a/charts/consul/test/unit/connect-inject-deployment.bats b/charts/consul/test/unit/connect-inject-deployment.bats index a3da403005..9ea21184ec 100755 --- a/charts/consul/test/unit/connect-inject-deployment.bats +++ b/charts/consul/test/unit/connect-inject-deployment.bats @@ -926,40 +926,188 @@ EOF #-------------------------------------------------------------------- # global.acls.manageSystemACLs -@test "connectInject/Deployment: CONSUL_HTTP_TOKEN env variable created when global.acls.manageSystemACLs=true" { +@test "connectInject/Deployment: consul-logout preStop hook is added when ACLs are enabled" { cd `chart_dir` local object=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[].name] ' | tee /dev/stderr) + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("consul-k8s-control-plane consul-logout"))' | tee /dev/stderr) + + [ "${object}" = "true" ] +} + +@test "connectInject/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[1].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls disabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) local actual=$(echo $object | - yq 'any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "connect-injector-acl-init" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq 'map(select(test("CONSUL_HTTP_TOKEN"))) | length' | tee /dev/stderr) - [ "${actual}" = "1" ] + yq '[.env[1].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] } -@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true" { +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { cd `chart_dir` local object=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[] | select(.name == "connect-injector-acl-init")' | tee /dev/stderr) local actual=$(echo $object | - yq -r '.name' | tee /dev/stderr) - [ "${actual}" = "injector-acl-init" ] + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "connect-injector-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=RELEASE-NAME-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "connect-injector-acl-init")' | tee /dev/stderr) local actual=$(echo $object | yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] } @test "connectInject/Deployment: cross namespace policy is not added when global.acls.manageSystemACLs=false" { @@ -985,6 +1133,61 @@ EOF [ "${actual}" = "true" ] } +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when in non-primary datacenter with Consul Namespaces disabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "connect-injector-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=RELEASE-NAME-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when in non-primary datacenter with Consul Namespaces enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "connect-injector-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=RELEASE-NAME-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-primary-datacenter=dc1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # resources diff --git a/charts/consul/test/unit/connect-inject-serviceaccount.bats b/charts/consul/test/unit/connect-inject-serviceaccount.bats index 464a838b07..07b38c3d49 100644 --- a/charts/consul/test/unit/connect-inject-serviceaccount.bats +++ b/charts/consul/test/unit/connect-inject-serviceaccount.bats @@ -28,6 +28,7 @@ load _helpers --set 'connectInject.enabled=false' \ . } + #-------------------------------------------------------------------- # global.imagePullSecrets diff --git a/charts/consul/test/unit/controller-clusterrole.bats b/charts/consul/test/unit/controller-clusterrole.bats index ed64f9050a..dc0b560e1d 100644 --- a/charts/consul/test/unit/controller-clusterrole.bats +++ b/charts/consul/test/unit/controller-clusterrole.bats @@ -43,17 +43,3 @@ load _helpers yq '.rules | map(select(.resources[0] == "podsecuritypolicies")) | length' | tee /dev/stderr) [ "${actual}" = "1" ] } - -#-------------------------------------------------------------------- -# global.acls.manageSystemACLs - -@test "controller/ClusterRole: allows secret access with global.acls.manageSystemACLs=true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/controller-clusterrole.yaml \ - --set 'controller.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r '.rules | map(select(.resourceNames[0] == "RELEASE-NAME-consul-controller-acl-token")) | length' | tee /dev/stderr) - [ "${actual}" = "1" ] -} diff --git a/charts/consul/test/unit/controller-deployment.bats b/charts/consul/test/unit/controller-deployment.bats index 248811867d..c9401c4d19 100644 --- a/charts/consul/test/unit/controller-deployment.bats +++ b/charts/consul/test/unit/controller-deployment.bats @@ -46,18 +46,39 @@ load _helpers #-------------------------------------------------------------------- # global.acls.manageSystemACLs -@test "controller/Deployment: CONSUL_HTTP_TOKEN env variable created when global.acls.manageSystemACLs=true" { +@test "controller/Deployment: consul-logout preStop hook is added when ACLs are enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/controller-deployment.yaml \ --set 'controller.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("consul-k8s-control-plane consul-logout"))' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true" { +@test "controller/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "controller/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls disabled" { cd `chart_dir` local object=$(helm template \ -s templates/controller-deployment.yaml \ @@ -73,6 +94,168 @@ load _helpers local actual=$(echo $object | yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=RELEASE-NAME-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] +} + +@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when federation enabled in non-primary datacenter" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=RELEASE-NAME-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-primary-datacenter=dc1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -486,38 +669,37 @@ load _helpers #-------------------------------------------------------------------- # aclToken -@test "controller/Deployment: aclToken disabled when secretName is missing" { +@test "controller/Deployment: aclToken enabled when secretName and secretKey is provided" { cd `chart_dir` local actual=$(helm template \ -s templates/controller-deployment.yaml \ --set 'controller.enabled=true' \ + --set 'controller.aclToken.secretName=foo' \ --set 'controller.aclToken.secretKey=bar' \ . | tee /dev/stderr | yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) - [ "${actual}" = "false" ] + [ "${actual}" = "true" ] } -@test "controller/Deployment: aclToken disabled when secretKey is missing" { +@test "controller/Deployment: aclToken env is set when ACLs are enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/controller-deployment.yaml \ --set 'controller.enabled=true' \ - --set 'controller.aclToken.secretName=foo' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) - [ "${actual}" = "false" ] + [ "${actual}" = "true" ] } -@test "controller/Deployment: aclToken enabled when secretName and secretKey is provided" { +@test "controller/Deployment: aclToken env is not set when ACLs are disabled" { cd `chart_dir` local actual=$(helm template \ -s templates/controller-deployment.yaml \ --set 'controller.enabled=true' \ - --set 'controller.aclToken.secretName=foo' \ - --set 'controller.aclToken.secretKey=bar' \ . | tee /dev/stderr | yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] } #-------------------------------------------------------------------- @@ -528,11 +710,16 @@ load _helpers local cmd=$(helm template \ -s templates/controller-deployment.yaml \ --set 'controller.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + yq '.spec.template.spec' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq '.containers[0].command | any(contains("-log-level=info"))' | tee /dev/stderr) + [ "${actual}" = "true" ] local actual=$(echo "$cmd" | - yq 'any(contains("-log-level=info"))' | tee /dev/stderr) + yq '.initContainers[0].command | any(contains("-log-level=info"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -542,11 +729,16 @@ load _helpers -s templates/controller-deployment.yaml \ --set 'controller.enabled=true' \ --set 'controller.logLevel=error' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + yq '.spec.template.spec' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq '.containers[0].command | any(contains("-log-level=error"))' | tee /dev/stderr) + [ "${actual}" = "true" ] local actual=$(echo "$cmd" | - yq 'any(contains("-log-level=error"))' | tee /dev/stderr) + yq '.initContainers[0].command | any(contains("-log-level=error"))' | tee /dev/stderr) [ "${actual}" = "true" ] } diff --git a/charts/consul/test/unit/helpers.bats b/charts/consul/test/unit/helpers.bats index ee524a6842..181df6a34c 100644 --- a/charts/consul/test/unit/helpers.bats +++ b/charts/consul/test/unit/helpers.bats @@ -139,7 +139,7 @@ load _helpers # consul.getAutoEncryptClientCA helper since we need an existing template that calls # the consul.getAutoEncryptClientCA helper. -@test "helper/consul.getAutoEncryptClientCA: get-auto-encrypt-client-ca uses server's stateful set address by default" { +@test "helper/consul.getAutoEncryptClientCA: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { cd `chart_dir` local command=$(helm template \ -s templates/tests/test-runner.yaml \ @@ -217,10 +217,6 @@ load _helpers # check the default server port is 443 if not provided actual=$(echo $command | jq ' . | contains("-server-port=443")') [ "${actual}" = "true" ] - - # check server's CA cert - actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') - [ "${actual}" = "true" ] } @test "helper/consul.getAutoEncryptClientCA: can pass cloud auto-join string to server address via externalServers.hosts" { @@ -285,7 +281,29 @@ load _helpers [ "${actual}" = "" ] } -@test "helper/consul.getAutoEncryptClientCA: uses the correct -ca-file when vault is enabled" { +@test "helper/consul.getAutoEncryptClientCA: uses the correct -ca-file when vault is enabled and external servers disabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/tests/test-runner.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'server.serverCert.secretName=pki_int/issue/test' \ + --set 'global.tls.caCert.secretName=pki_int/ca/pem' \ + . | tee /dev/stderr | + yq '.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca")' | tee /dev/stderr) + + actual=$(echo $object | jq '.command | join(" ") | contains("-ca-file=/vault/secrets/serverca.crt")') + [ "${actual}" = "true" ] + + actual=$(echo $object | jq '.volumeMounts[] | select(.name == "consul-ca-cert")') + [ "${actual}" = "" ] +} + +@test "helper/consul.getAutoEncryptClientCA: uses the correct -ca-file when vault and external servers is enabled" { cd `chart_dir` local object=$(helm template \ -s templates/tests/test-runner.yaml \ @@ -298,6 +316,8 @@ load _helpers --set 'server.serverCert.secretName=pki_int/issue/test' \ --set 'global.tls.caCert.secretName=pki_int/ca/pem' \ --set 'server.enabled=false' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=consul.io' \ . | tee /dev/stderr | yq '.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca")' | tee /dev/stderr) diff --git a/charts/consul/test/unit/ingress-gateways-deployment.bats b/charts/consul/test/unit/ingress-gateways-deployment.bats index 76b20dd501..ebe59d9489 100644 --- a/charts/consul/test/unit/ingress-gateways-deployment.bats +++ b/charts/consul/test/unit/ingress-gateways-deployment.bats @@ -25,6 +25,41 @@ load _helpers [ "${actual}" = "RELEASE-NAME-consul-ingress-gateway" ] } +@test "ingressGateways/Deployment: serviceAccountName is set properly" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'ingress.defaults.consulNamespace=namespace' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.serviceAccountName' | tee /dev/stderr) + + [ "${actual}" = "RELEASE-NAME-consul-ingress-gateway" ] +} + +@test "ingressGateways/Deployment: Adds consul service volumeMount to gateway container" { + cd `chart_dir` + local object=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | yq '.spec.template.spec.containers[0].volumeMounts[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "consul-service" ] + + local actual=$(echo $object | + yq -r '.mountPath' | tee /dev/stderr) + [ "${actual}" = "/consul/service" ] + + local actual=$(echo $object | + yq -r '.readOnly' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # prerequisites @@ -72,6 +107,36 @@ load _helpers [[ "$output" =~ "clients must be enabled" ]] } +@test "ingressGateways/Deployment: fails if there are duplicate gateway names" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'ingressGateways.gateways[0].name=foo' \ + --set 'ingressGateways.gateways[1].name=foo' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=true' \ + --set 'client.enabled=true' . + echo "status: $output" + [ "$status" -eq 1 ] + [[ "$output" =~ "ingress gateways must have unique names but found duplicate name foo" ]] +} + +@test "ingressGateways/Deployment: fails if a terminating gateway has the same name as an ingress gateway" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'ingressGateways.enabled=true' \ + --set 'terminatingGateways.gateways[0].name=foo' \ + --set 'ingressGateways.gateways[0].name=foo' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=true' \ + --set 'client.enabled=true' . + echo "status: $output" + [ "$status" -eq 1 ] + [[ "$output" =~ "terminating gateways cannot have duplicate names of any ingress gateways" ]] +} #-------------------------------------------------------------------- # envoyImage @@ -234,7 +299,7 @@ load _helpers #-------------------------------------------------------------------- # global.acls.manageSystemACLs -@test "ingressGateways/Deployment: CONSUL_HTTP_TOKEN env variable created when global.acls.manageSystemACLs=true" { +@test "ingressGateways/Deployment: consul-sidecar uses -token-file flag when global.acls.manageSystemACLs=true" { cd `chart_dir` local actual=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ @@ -242,19 +307,103 @@ load _helpers --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq -s '[.[0].spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + yq -s '[.[0].spec.template.spec.containers[1].command[6]] | any(contains("-token-file=/consul/service/acl-token"))' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "ingressGateways/Deployment: consul-sidecar uses -token-file flag when global.acls.manageSystemACLs=true" { +@test "ingressGateways/Deployment: Adds consul envvars CONSUL_HTTP_ADDR on ingress-gateway-init init container when ACLs are enabled and tls is enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "https://\$(HOST_IP):8501" ] +} + +@test "ingressGateways/Deployment: Adds consul envvars CONSUL_HTTP_ADDR on ingress-gateway-init init container when ACLs are enabled and tls is not enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "http://\$(HOST_IP):8500" ] +} + +@test "ingressGateways/Deployment: Does not add consul envvars CONSUL_CACERT on ingress-gateway-init init container when ACLs are enabled and tls is not enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ --set 'ingressGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1].env[] | select(.name == "CONSUL_CACERT")' | tee /dev/stderr) + + [ "${actual}" = "" ] +} + +@test "ingressGateways/Deployment: Adds consul envvars CONSUL_CACERT on ingress-gateway-init init container when ACLs are enabled and tls is enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ --set 'connectInject.enabled=true' \ + --set 'ingressGateways.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -s '.[0].spec.template.spec.containers[1].command | any(contains("-token-file=/consul/service/acl-token"))' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "ingressGateways/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=false' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "ingressGateways/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -s '[.[0].spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: consul-logout preStop hook is added when ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[3]] | any(contains("/consul-bin/consul logout"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -954,7 +1103,7 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -968,7 +1117,7 @@ key2: value2' \ --set 'ingressGateways.gateways[0].name=ingress-gateway' \ --set 'ingressGateways.gateways[0].service.type=ClusterIP' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -980,7 +1129,7 @@ key2: value2' \ --set 'connectInject.enabled=true' \ --set 'ingressGateways.defaults.service.type=LoadBalancer' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -993,7 +1142,7 @@ key2: value2' \ --set 'ingressGateways.gateways[0].name=ingress-gateway' \ --set 'ingressGateways.gateways[0].service.type=LoadBalancer' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1006,7 +1155,7 @@ key2: value2' \ --set 'ingressGateways.defaults.service.type=NodePort' \ --set 'ingressGateways.defaults.service.ports[0].nodePort=1234' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2] | contains("WAN_ADDR=\"${HOST_IP}\"")' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"${HOST_IP}\"")' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1020,7 +1169,7 @@ key2: value2' \ --set 'ingressGateways.gateways[0].service.type=NodePort' \ --set 'ingressGateways.gateways[0].service.ports[0].nodePort=1234' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2] | contains("WAN_ADDR=\"${HOST_IP}\"")' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"${HOST_IP}\"")' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1061,7 +1210,7 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2] | contains("WAN_PORT=80")' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_PORT=80")' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1073,7 +1222,7 @@ key2: value2' \ --set 'connectInject.enabled=true' \ --set 'ingressGateways.defaults.service.ports[0].port=1234' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1086,7 +1235,7 @@ key2: value2' \ --set 'ingressGateways.gateways[0].name=ingress-gateway' \ --set 'ingressGateways.gateways[0].service.ports[0].port=1234' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1099,7 +1248,7 @@ key2: value2' \ --set 'ingressGateways.defaults.service.type=NodePort' \ --set 'ingressGateways.defaults.service.ports[0].nodePort=1234' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1114,7 +1263,7 @@ key2: value2' \ --set 'ingressGateways.gateways[0].service.type=NodePort' \ --set 'ingressGateways.gateways[0].service.ports[0].nodePort=1234' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1161,16 +1310,16 @@ key2: value2' \ } #-------------------------------------------------------------------- -# service-init init container +# ingress-gateway-init init container -@test "ingressGateways/Deployment: service-init init container defaults" { +@test "ingressGateways/Deployment: ingress-gateway-init init container defaults" { cd `chart_dir` local actual=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp='consul-k8s-control-plane service-address \ -log-level=info \ @@ -1225,7 +1374,7 @@ EOF [ "${actual}" = "${exp}" ] } -@test "ingressGateways/Deployment: service-init init container with acls.manageSystemACLs=true" { +@test "ingressGateways/Deployment: ingress-gateway-init init container with acls.manageSystemACLs=true" { cd `chart_dir` local actual=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ @@ -1233,12 +1382,14 @@ EOF --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp='consul-k8s-control-plane acl-init \ - -secret-name="RELEASE-NAME-consul-ingress-gateway-ingress-gateway-acl-token" \ - -k8s-namespace=default \ - -token-sink-file=/consul/service/acl-token + -component-name=ingress-gateway/RELEASE-NAME-consul-ingress-gateway \ + -acl-auth-method=RELEASE-NAME-consul-k8s-component-auth-method \ + -token-sink-file=/consul/service/acl-token \ + -log-level=info \ + -log-json=false consul-k8s-control-plane service-address \ -log-level=info \ @@ -1294,7 +1445,7 @@ EOF [ "${actual}" = "${exp}" ] } -@test "ingressGateways/Deployment: service-init init container includes service-address command for LoadBalancer set through defaults" { +@test "ingressGateways/Deployment: ingress-gateway-init init container includes service-address command for LoadBalancer set through defaults" { cd `chart_dir` local actual=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ @@ -1302,11 +1453,11 @@ EOF --set 'connectInject.enabled=true' \ --set 'ingressGateways.defaults.service.type=LoadBalancer' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "ingressGateways/Deployment: service-init init container includes service-address command for LoadBalancer set through specific gateway overriding defaults" { +@test "ingressGateways/Deployment: ingress-gateway-init init container includes service-address command for LoadBalancer set through specific gateway overriding defaults" { cd `chart_dir` local actual=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ @@ -1315,11 +1466,11 @@ EOF --set 'ingressGateways.gateways[0].name=ingress-gateway' \ --set 'ingressGateways.gateways[0].service.type=LoadBalancer' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "ingressGateways/Deployment: service-init init container does not include service-address command for NodePort set through defaults" { +@test "ingressGateways/Deployment: ingress-gateway-init init container does not include service-address command for NodePort set through defaults" { cd `chart_dir` local actual=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ @@ -1329,11 +1480,11 @@ EOF --set 'ingressGateways.defaults.service.ports[0].port=80' \ --set 'ingressGateways.defaults.service.ports[0].nodePort=1234' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) [ "${actual}" = "false" ] } -@test "ingressGateways/Deployment: service-init init container does not include service-address command for NodePort set through specific gateway overriding defaults" { +@test "ingressGateways/Deployment: ingress-gateway-init init container does not include service-address command for NodePort set through specific gateway overriding defaults" { cd `chart_dir` local actual=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ @@ -1344,7 +1495,7 @@ EOF --set 'ingressGateways.gateways[0].service.ports[0].port=80' \ --set 'ingressGateways.gateways[0].service.ports[0].nodePort=1234' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) [ "${actual}" = "false" ] } diff --git a/charts/consul/test/unit/ingress-gateways-role.bats b/charts/consul/test/unit/ingress-gateways-role.bats index 431c889f50..d592820b8b 100644 --- a/charts/consul/test/unit/ingress-gateways-role.bats +++ b/charts/consul/test/unit/ingress-gateways-role.bats @@ -46,7 +46,7 @@ load _helpers [ "${actual}" = "secrets" ] local actual=$(echo $object | yq -r '.resourceNames[0]' | tee /dev/stderr) - [ "${actual}" = "RELEASE-NAME-consul-ingress-gateway-ingress-gateway-acl-token" ] + [ "${actual}" = "RELEASE-NAME-consul-ingress-gateway-acl-token" ] } @test "ingressGateways/Role: rules for ingressGateways service" { diff --git a/charts/consul/test/unit/mesh-gateway-clusterrole.bats b/charts/consul/test/unit/mesh-gateway-clusterrole.bats index fa9c39b923..da4d0bdb2c 100644 --- a/charts/consul/test/unit/mesh-gateway-clusterrole.bats +++ b/charts/consul/test/unit/mesh-gateway-clusterrole.bats @@ -32,18 +32,6 @@ load _helpers [ "${actual}" = "podsecuritypolicies" ] } -@test "meshGateway/ClusterRole: rules for global.acls.manageSystemACLs=true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/mesh-gateway-clusterrole.yaml \ - --set 'meshGateway.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r '.rules[0].resources[0]' | tee /dev/stderr) - [ "${actual}" = "secrets" ] -} - @test "meshGateway/ClusterRole: rules for meshGateway.wanAddress.source=Service" { cd `chart_dir` local actual=$(helm template \ @@ -83,5 +71,5 @@ load _helpers --set 'meshGateway.wanAddress.source=Service' \ . | tee /dev/stderr | yq -r '.rules | length' | tee /dev/stderr) - [ "${actual}" = "3" ] + [ "${actual}" = "2" ] } diff --git a/charts/consul/test/unit/mesh-gateway-deployment.bats b/charts/consul/test/unit/mesh-gateway-deployment.bats index e8a2b1eeec..d4adca93b7 100755 --- a/charts/consul/test/unit/mesh-gateway-deployment.bats +++ b/charts/consul/test/unit/mesh-gateway-deployment.bats @@ -438,9 +438,9 @@ key2: value2' \ } #-------------------------------------------------------------------- -# service-init container resources +# mesh-gateway-init container resources -@test "meshGateway/Deployment: init service-init container has default resources" { +@test "meshGateway/Deployment: init mesh-gateway-init container has default resources" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -455,7 +455,7 @@ key2: value2' \ [ $(echo "${actual}" | yq -r '.limits.cpu') = "50m" ] } -@test "meshGateway/Deployment: init service-init container resources can be set" { +@test "meshGateway/Deployment: init mesh-gateway-init container resources can be set" { cd `chart_dir` local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -584,31 +584,228 @@ key2: value2' \ [[ "$output" =~ "if global.acls.manageSystemACLs is true, meshGateway.consulServiceName cannot be set" ]] } -@test "meshGateway/Deployment: does not fail if consulServiceName is set to mesh-gateway and acls.manageSystemACLs is true" { +#-------------------------------------------------------------------- +# manageSystemACLs + +@test "meshGateway/Deployment: consul-logout preStop hook is added when ACLs are enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'meshGateway.consulServiceName=mesh-gateway' \ --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr \ - | yq '.spec.template.spec.containers[0]' | tee /dev/stderr ) + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[3]] | any(contains("/consul-bin/consul logout"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} - [[ $(echo "${actual}" | yq -r '.lifecycle.preStop.exec.command' ) =~ '-id=\"mesh-gateway\"' ]] +@test "meshGateway/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] } -@test "meshGateway/Deployment: consulServiceName can be set" { +@test "meshGateway/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'meshGateway.consulServiceName=overridden' \ - . | tee /dev/stderr \ - | yq '.spec.template.spec.containers[0]' | tee /dev/stderr ) + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[2].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls disabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "mesh-gateway-init" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] +} + +@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "mesh-gateway-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[3].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] - [[ $(echo "${actual}" | yq -r '.lifecycle.preStop.exec.command' ) =~ '-id=\"overridden\"' ]] + local actual=$(echo $object | + yq '[.env[3].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[2] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "mesh-gateway-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=RELEASE-NAME-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[3].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[3].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[2] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "mesh-gateway-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[3].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[3].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[2] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "meshGateway/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] +} + +@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when federation enabled in non-primary datacenter" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "mesh-gateway-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=RELEASE-NAME-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-primary-datacenter=dc1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -848,16 +1045,16 @@ key2: value2' \ } ##-------------------------------------------------------------------- -## service-init init container +## mesh-gateway-init init container -@test "meshGateway/Deployment: service-init init container" { +@test "meshGateway/Deployment: mesh-gateway-init init container" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp='consul-k8s-control-plane service-address \ -log-level=info \ @@ -901,7 +1098,7 @@ EOF [ "${actual}" = "${exp}" ] } -@test "meshGateway/Deployment: service-init init container with acls.manageSystemACLs=true" { +@test "meshGateway/Deployment: mesh-gateway-init init container with acls.manageSystemACLs=true" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -909,12 +1106,14 @@ EOF --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp='consul-k8s-control-plane acl-init \ - -secret-name="RELEASE-NAME-consul-mesh-gateway-acl-token" \ - -k8s-namespace=default \ - -token-sink-file=/consul/service/acl-token + -component-name=mesh-gateway \ + -token-sink-file=/consul/service/acl-token \ + -acl-auth-method=RELEASE-NAME-consul-k8s-component-auth-method \ + -log-level=info \ + -log-json=false consul-k8s-control-plane service-address \ -log-level=info \ @@ -959,7 +1158,7 @@ EOF [ "${actual}" = "${exp}" ] } -@test "meshGateway/Deployment: service-init init container with global.federation.enabled=true" { +@test "meshGateway/Deployment: mesh-gateway-init init container with global.federation.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -968,7 +1167,7 @@ EOF --set 'global.federation.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp='consul-k8s-control-plane service-address \ -log-level=info \ @@ -1015,7 +1214,7 @@ EOF [ "${actual}" = "${exp}" ] } -@test "meshGateway/Deployment: service-init init container containerPort and wanAddress.port can be changed" { +@test "meshGateway/Deployment: mesh-gateway-init init container containerPort and wanAddress.port can be changed" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -1025,7 +1224,7 @@ EOF --set 'meshGateway.wanAddress.source=NodeIP' \ --set 'meshGateway.wanAddress.port=9999' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp='WAN_ADDR="${HOST_IP}" WAN_PORT="9999" @@ -1063,7 +1262,7 @@ EOF [ "${actual}" = "${exp}" ] } -@test "meshGateway/Deployment: service-init init container wanAddress.source=NodeIP" { +@test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=NodeIP" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -1071,7 +1270,7 @@ EOF --set 'connectInject.enabled=true' \ --set 'meshGateway.wanAddress.source=NodeIP' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp='WAN_ADDR="${HOST_IP}" WAN_PORT="443" @@ -1109,7 +1308,7 @@ EOF [ "${actual}" = "${exp}" ] } -@test "meshGateway/Deployment: service-init init container wanAddress.source=NodeName" { +@test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=NodeName" { cd `chart_dir` local obj=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -1123,7 +1322,7 @@ EOF [ "${actual}" = "true" ] local actual=$(echo "$obj" | - yq -r '.spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp='WAN_ADDR="${NODE_NAME}" WAN_PORT="443" @@ -1161,7 +1360,7 @@ EOF [ "${actual}" = "${exp}" ] } -@test "meshGateway/Deployment: service-init init container wanAddress.source=Static fails if wanAddress.static is empty" { +@test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Static fails if wanAddress.static is empty" { cd `chart_dir` run helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -1175,7 +1374,7 @@ EOF [[ "$output" =~ "if meshGateway.wanAddress.source=Static then meshGateway.wanAddress.static cannot be empty" ]] } -@test "meshGateway/Deployment: service-init init container wanAddress.source=Static" { +@test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Static" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -1184,7 +1383,7 @@ EOF --set 'meshGateway.wanAddress.source=Static' \ --set 'meshGateway.wanAddress.static=example.com' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp='WAN_ADDR="example.com" WAN_PORT="443" @@ -1222,7 +1421,7 @@ EOF [ "${actual}" = "${exp}" ] } -@test "meshGateway/Deployment: service-init init container wanAddress.source=Service fails if service.enable is false" { +@test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Service fails if service.enable is false" { cd `chart_dir` run helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -1236,7 +1435,7 @@ EOF [[ "$output" =~ "if meshGateway.wanAddress.source=Service then meshGateway.service.enabled must be set to true" ]] } -@test "meshGateway/Deployment: service-init init container wanAddress.source=Service, type=LoadBalancer" { +@test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Service, type=LoadBalancer" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -1247,7 +1446,7 @@ EOF --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.type=LoadBalancer' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp='consul-k8s-control-plane service-address \ -log-level=info \ @@ -1291,7 +1490,7 @@ EOF [ "${actual}" = "${exp}" ] } -@test "meshGateway/Deployment: service-init init container wanAddress.source=Service, type=NodePort" { +@test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Service, type=NodePort" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -1302,7 +1501,7 @@ EOF --set 'meshGateway.service.nodePort=9999' \ --set 'meshGateway.service.type=NodePort' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp='WAN_ADDR="${HOST_IP}" WAN_PORT="9999" @@ -1340,7 +1539,7 @@ EOF [ "${actual}" = "${exp}" ] } -@test "meshGateway/Deployment: service-init init container wanAddress.source=Service, type=NodePort fails if service.nodePort is null" { +@test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Service, type=NodePort fails if service.nodePort is null" { cd `chart_dir` run helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -1355,7 +1554,7 @@ EOF [[ "$output" =~ "if meshGateway.wanAddress.source=Service and meshGateway.service.type=NodePort, meshGateway.service.nodePort must be set" ]] } -@test "meshGateway/Deployment: service-init init container wanAddress.source=Service, type=ClusterIP" { +@test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Service, type=ClusterIP" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -1366,7 +1565,7 @@ EOF --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.type=ClusterIP' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp='consul-k8s-control-plane service-address \ -log-level=info \ @@ -1410,7 +1609,7 @@ EOF [ "${actual}" = "${exp}" ] } -@test "meshGateway/Deployment: service-init init container consulServiceName can be changed" { +@test "meshGateway/Deployment: mesh-gateway-init init container consulServiceName can be changed" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -1418,7 +1617,7 @@ EOF --set 'connectInject.enabled=true' \ --set 'meshGateway.consulServiceName=new-name' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp='consul-k8s-control-plane service-address \ -log-level=info \ diff --git a/charts/consul/test/unit/partition-init-job.bats b/charts/consul/test/unit/partition-init-job.bats index ca1a9a6d37..a907884afb 100644 --- a/charts/consul/test/unit/partition-init-job.bats +++ b/charts/consul/test/unit/partition-init-job.bats @@ -91,7 +91,7 @@ load _helpers actual=$(echo $command | jq -r '. | any(contains("-use-https"))' | tee /dev/stderr) [ "${actual}" = "true" ] - actual=$(echo $command | jq -r '. | any(contains("-consul-ca-cert=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + actual=$(echo $command | jq -r '. | any(contains("-ca-file=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) [ "${actual}" = "true" ] actual=$(echo $command | jq -r '. | any(contains("-server-port=8501"))' | tee /dev/stderr) @@ -113,7 +113,7 @@ load _helpers yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) local actual - actual=$(echo $command | jq -r '. | any(contains("-consul-ca-cert=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + actual=$(echo $command | jq -r '. | any(contains("-ca-file=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) [ "${actual}" = "false" ] } @@ -202,3 +202,323 @@ reservedNameTest() { [ "$status" -eq 1 ] [[ "$output" =~ "The name $name set for key global.adminPartitions.name is reserved by Consul for future use" ]] } + +#-------------------------------------------------------------------- +# Vault + +@test "partitionInit/Job: fails when vault and ACLs are enabled but adminPartitionsRole is not provided" { + cd `chart_dir` + run helm template \ + -s templates/partition-init-job.yaml \ + --set 'global.enabled=false' \ + --set 'global.adminPartitions.enabled=true' \ + --set "global.adminPartitions.name=bar" \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.acls.bootstrapToken.secretName=boot' \ + --set 'global.acls.bootstrapToken.secretKey=token' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=test' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.secretsBackend.vault.adminPartitionsRole is required when global.secretsBackend.vault.enabled and global.acls.manageSystemACLs are true." ]] +} + +@test "partitionInit/Job: configures vault annotations when ACLs are enabled but TLS disabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/partition-init-job.yaml \ + --set 'global.enabled=false' \ + --set 'global.adminPartitions.enabled=true' \ + --set "global.adminPartitions.name=bar" \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.adminPartitionsRole=aprole' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=aclrole' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.acls.bootstrapToken.secretName=foo' \ + --set 'global.acls.bootstrapToken.secretKey=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + # Check annotations + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-pre-populate-only"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "aprole" ] + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-secret-bootstrap-token"') + [ "${actual}" = "foo" ] + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-template-bootstrap-token"') + local expected=$'{{- with secret \"foo\" -}}\n{{- .Data.data.bar -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + # Check that the bootstrap token flag is set to the path of the Vault secret. + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").env[] | select(.name=="CONSUL_HTTP_TOKEN_FILE").value') + [ "${actual}" = "/vault/secrets/bootstrap-token" ] + + # Check that no (secret) volumes are not attached + local actual=$(echo $object | jq -r '.spec.volumes') + [ "${actual}" = "null" ] + + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").volumeMounts') + [ "${actual}" = "null" ] +} + +@test "partitionInit/Job: configures server CA to come from vault when vault and TLS are enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/partition-init-job.yaml \ + --set 'global.enabled=false' \ + --set 'global.adminPartitions.enabled=true' \ + --set "global.adminPartitions.name=bar" \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + # Check annotations + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-pre-populate-only"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "carole" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-secret-serverca.crt"]' | tee /dev/stderr) + [ "${actual}" = "foo" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr) + [ "${actual}" = $'{{- with secret \"foo\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' ] + + # Check that the consul-ca-cert volume is not attached + local actual=$(echo $object | jq -r '.spec.volumes') + [ "${actual}" = "null" ] + + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").volumeMounts') + [ "${actual}" = "null" ] +} + +@test "partitionInit/Job: configures vault annotations when both ACLs and TLS are enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/partition-init-job.yaml \ + --set 'global.enabled=false' \ + --set 'global.adminPartitions.enabled=true' \ + --set "global.adminPartitions.name=bar" \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=aclrole' \ + --set 'global.secretsBackend.vault.adminPartitionsRole=aprole' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.acls.bootstrapToken.secretName=foo' \ + --set 'global.acls.bootstrapToken.secretKey=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + # Check annotations + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-pre-populate-only"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "aprole" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-secret-serverca.crt"]' | tee /dev/stderr) + [ "${actual}" = "foo" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr) + [ "${actual}" = $'{{- with secret \"foo\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' ] + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-secret-bootstrap-token"') + [ "${actual}" = "foo" ] + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-template-bootstrap-token"') + local expected=$'{{- with secret \"foo\" -}}\n{{- .Data.data.bar -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + # Check that the bootstrap token flag is set to the path of the Vault secret. + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").env[] | select(.name=="CONSUL_HTTP_TOKEN_FILE").value') + [ "${actual}" = "/vault/secrets/bootstrap-token" ] + + # Check that the consul-ca-cert volume is not attached + local actual=$(echo $object | jq -r '.spec.volumes') + [ "${actual}" = "null" ] + + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").volumeMounts') + [ "${actual}" = "null" ] +} + +@test "partitionInit/Job: vault CA is not configured by default" { + cd `chart_dir` + local object=$(helm template \ + -s templates/partition-init-job.yaml \ + --set 'global.enabled=false' \ + --set 'global.adminPartitions.enabled=true' \ + --set "global.adminPartitions.name=bar" \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "partitionInit/Job: vault CA is not configured when secretName is set but secretKey is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/partition-init-job.yaml \ + --set 'global.enabled=false' \ + --set 'global.adminPartitions.enabled=true' \ + --set "global.adminPartitions.name=bar" \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "partitionInit/Job: vault CA is not configured when secretKey is set but secretName is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/partition-init-job.yaml \ + --set 'global.enabled=false' \ + --set 'global.adminPartitions.enabled=true' \ + --set "global.adminPartitions.name=bar" \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "partitionInit/Job: vault CA is configured when both secretName and secretKey are set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/partition-init-job.yaml \ + --set 'global.enabled=false' \ + --set 'global.adminPartitions.enabled=true' \ + --set "global.adminPartitions.name=bar" \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-extra-secret"') + [ "${actual}" = "ca" ] + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/ca-cert"') + [ "${actual}" = "/vault/custom/tls.crt" ] +} + +#-------------------------------------------------------------------- +# Vault agent annotations + +@test "partitionInit/Job: no vault agent annotations defined by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-init-job.yaml \ + --set 'global.enabled=false' \ + --set 'global.adminPartitions.enabled=true' \ + --set "global.adminPartitions.name=bar" \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=aclrole' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/agent-pre-populate-only") | del(."vault.hashicorp.com/role") | del(."vault.hashicorp.com/agent-inject-secret-serverca.crt") | del(."vault.hashicorp.com/agent-inject-template-serverca.crt")' | tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "partitionInit/Job: vault agent annotations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-init-job.yaml \ + --set 'global.enabled=false' \ + --set 'global.adminPartitions.enabled=true' \ + --set "global.adminPartitions.name=bar" \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=aclrole' \ + --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} diff --git a/charts/consul/test/unit/server-acl-init-job.bats b/charts/consul/test/unit/server-acl-init-job.bats index 7eb37a2329..0158dab7c6 100644 --- a/charts/consul/test/unit/server-acl-init-job.bats +++ b/charts/consul/test/unit/server-acl-init-job.bats @@ -99,25 +99,25 @@ load _helpers [[ "$output" =~ "global.bootstrapACLs was removed, use global.acls.manageSystemACLs instead" ]] } -@test "serverACLInit/Job: does not set -create-client-token=false when client is enabled (the default)" { +@test "serverACLInit/Job: does not set -client=false when client is enabled (the default)" { cd `chart_dir` local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command[2] | contains("-create-client-token=false")' | + yq '.spec.template.spec.containers[0].command[2] | contains("-client=false")' | tee /dev/stderr) [ "${actual}" = "false" ] } -@test "serverACLInit/Job: sets -create-client-token=false when client is disabled" { +@test "serverACLInit/Job: sets -client=false when client is disabled" { cd `chart_dir` local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'client.enabled=false' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command[2] | contains("-create-client-token=false")' | + yq '.spec.template.spec.containers[0].command[2] | contains("-client=false")' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -225,7 +225,7 @@ load _helpers -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-create-snapshot-agent-token"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-snapshot-agent"))' | tee /dev/stderr) [ "${actual}" = "false" ] } @@ -236,7 +236,7 @@ load _helpers --set 'global.acls.manageSystemACLs=true' \ --set 'client.snapshotAgent.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-create-snapshot-agent-token"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-snapshot-agent"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -249,7 +249,7 @@ load _helpers -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-create-sync-token"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-sync-catalog"))' | tee /dev/stderr) [ "${actual}" = "false" ] } @@ -260,7 +260,7 @@ load _helpers --set 'global.acls.manageSystemACLs=true' \ --set 'syncCatalog.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-create-sync-token"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-sync-catalog"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -297,7 +297,7 @@ load _helpers --set 'global.acls.manageSystemACLs=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-create-mesh-gateway-token"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-mesh-gateway"))' | tee /dev/stderr) [ "${actual}" = "false" ] } @@ -309,7 +309,7 @@ load _helpers --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-create-mesh-gateway-token"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-mesh-gateway"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -571,11 +571,86 @@ load _helpers [ "${actual}" = "key" ] } -@test "serverACLInit/Job: configures server CA to come from vault when vault is enabled" { +#-------------------------------------------------------------------- +# Vault + +@test "serverACLInit/Job: fails when vault is enabled but neither bootstrap nor replication token is provided" { + cd `chart_dir` + run helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.acls.bootstrapToken or global.acls.replicationToken must be provided when global.secretsBackend.vault.enabled and global.acls.manageSystemACLs are true" ]] +} + +@test "serverACLInit/Job: fails when vault is enabled but manageSystemACLsRole is not provided" { + cd `chart_dir` + run helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.acls.bootstrapToken.secretName=name' \ + --set 'global.acls.bootstrapToken.secretKey=key' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.secretsBackend.vault.manageSystemACLsRole is required when global.secretsBackend.vault.enabled and global.acls.manageSystemACLs are true" ]] +} + +@test "serverACLInit/Job: configures vault annotations and bootstrap token secret by default" { cd `chart_dir` local object=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ + --set 'global.acls.bootstrapToken.secretName=foo' \ + --set 'global.acls.bootstrapToken.secretKey=bar' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=aclrole' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + # Check annotations + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-pre-populate-only"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "aclrole" ] + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-secret-bootstrap-token"') + [ "${actual}" = "foo" ] + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-template-bootstrap-token"') + local expected=$'{{- with secret \"foo\" -}}\n{{- .Data.data.bar -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + # Check that the bootstrap token flag is set to the path of the Vault secret. + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").command | any(contains("-bootstrap-token-file=/vault/secrets/bootstrap-token"))') + [ "${actual}" = "true" ] + + # Check that no (secret) volumes are not attached + local actual=$(echo $object | jq -r '.spec.volumes') + [ "${actual}" = "null" ] + + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").volumeMounts') + [ "${actual}" = "null" ] +} + +@test "serverACLInit/Job: configures server CA to come from vault when vault and TLS are enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.acls.bootstrapToken.secretName=foo' \ + --set 'global.acls.bootstrapToken.secretKey=bar' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.server.serverCert.secretName=foo' \ @@ -584,6 +659,7 @@ load _helpers --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=aclrole' \ . | tee /dev/stderr | yq -r '.spec.template' | tee /dev/stderr) @@ -596,7 +672,7 @@ load _helpers [ "${actual}" = "true" ] local actual actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) - [ "${actual}" = "carole" ] + [ "${actual}" = "aclrole" ] local actual actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-secret-serverca.crt"]' | tee /dev/stderr) [ "${actual}" = "foo" ] @@ -608,7 +684,7 @@ load _helpers local actual=$(echo $object | jq -r '.spec.volumes') [ "${actual}" = "null" ] - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name="post-install-job").volumeMounts') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").volumeMounts') [ "${actual}" = "null" ] } @@ -617,6 +693,8 @@ load _helpers local object=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ + --set 'global.acls.bootstrapToken.secretName=foo' \ + --set 'global.acls.bootstrapToken.secretKey=bar' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ @@ -625,6 +703,7 @@ load _helpers --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=aclrole' \ . | tee /dev/stderr | yq -r '.spec.template' | tee /dev/stderr) @@ -639,6 +718,8 @@ load _helpers local object=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ + --set 'global.acls.bootstrapToken.secretName=foo' \ + --set 'global.acls.bootstrapToken.secretKey=bar' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.server.serverCert.secretName=foo' \ @@ -648,6 +729,7 @@ load _helpers --set 'global.secretsBackend.vault.consulServerRole=test' \ --set 'global.secretsBackend.vault.consulCARole=carole' \ --set 'global.secretsBackend.vault.ca.secretName=ca' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=aclrole' \ . | tee /dev/stderr | yq -r '.spec.template' | tee /dev/stderr) @@ -662,6 +744,8 @@ load _helpers local object=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ + --set 'global.acls.bootstrapToken.secretName=foo' \ + --set 'global.acls.bootstrapToken.secretKey=bar' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ @@ -671,6 +755,7 @@ load _helpers --set 'global.secretsBackend.vault.consulServerRole=test' \ --set 'global.secretsBackend.vault.consulCARole=carole' \ --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=aclrole' \ . | tee /dev/stderr | yq -r '.spec.template' | tee /dev/stderr) @@ -685,6 +770,8 @@ load _helpers local object=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ + --set 'global.acls.bootstrapToken.secretName=foo' \ + --set 'global.acls.bootstrapToken.secretKey=bar' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ @@ -695,6 +782,7 @@ load _helpers --set 'global.secretsBackend.vault.consulCARole=carole' \ --set 'global.secretsBackend.vault.ca.secretName=ca' \ --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=aclrole' \ . | tee /dev/stderr | yq -r '.spec.template' | tee /dev/stderr) @@ -712,9 +800,6 @@ load _helpers local object=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -738,36 +823,112 @@ load _helpers [ "${actual}" = "${expected}" ] # Check that replication token Kubernetes secret volumes and volumeMounts are not attached. + local actual=$(echo $object | jq -r '.spec.volumes') + [ "${actual}" = "null" ] + + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").volumeMounts') + [ "${actual}" = "null" ] + + # Check that the replication token flag is set to the path of the Vault secret. + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").command | any(contains("-acl-replication-token-file=/vault/secrets/replication-token"))') + [ "${actual}" = "true" ] +} + +@test "serverACLInit/Job: both replication and bootstrap tokens can be provided together" { + cd `chart_dir` + local object=$(helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=acl-role' \ + --set 'global.acls.replicationToken.secretName=/vault/replication' \ + --set 'global.acls.replicationToken.secretKey=token' \ + --set 'global.acls.bootstrapToken.secretName=/vault/bootstrap' \ + --set 'global.acls.bootstrapToken.secretKey=token' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + # Check that the role is set. + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/role"') + [ "${actual}" = "acl-role" ] + + # Check Vault secret annotations. local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-secret-replication-token"') - [ "${actual}" = "/vault/secret" ] + [ "${actual}" = "/vault/replication" ] + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-template-replication-token"') + local expected=$'{{- with secret \"/vault/replication\" -}}\n{{- .Data.data.token -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-secret-bootstrap-token"') + [ "${actual}" = "/vault/bootstrap" ] + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-template-bootstrap-token"') + local expected=$'{{- with secret \"/vault/bootstrap\" -}}\n{{- .Data.data.token -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + # Check that replication token Kubernetes secret volumes and volumeMounts are not attached. local actual=$(echo $object | jq -r '.spec.volumes') [ "${actual}" = "null" ] - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name="post-install-job").volumeMounts') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").volumeMounts') [ "${actual}" = "null" ] - # Check that the replication token flag is set to the path of the Vault secret. - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name="post-install-job").command | any(contains("-acl-replication-token-file=/vault/secrets/replication-token"))') + # Check that the replication and bootstrap token flags are set to the path of the Vault secret. + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").command | any(contains("-acl-replication-token-file=/vault/secrets/replication-token"))') + [ "${actual}" = "true" ] + + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").command | any(contains("-bootstrap-token-file=/vault/secrets/bootstrap-token"))') [ "${actual}" = "true" ] } -@test "serverACLInit/Job: manageSystemACLsRole is required when Vault is enabled and replication token is set" { +#-------------------------------------------------------------------- +# Partition token in Vault + +@test "serverACLInit/Job: vault partition token can be provided" { cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.acls.replicationToken.secretName=/vault/secret' \ - --set 'global.acls.replicationToken.secretKey=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulClientRole=foo' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'global.secretsBackend.vault.consulCARole=carole' . - [ "$status" -eq 1 ] - [[ "$output" =~ "global.secretsBackend.vault.manageSystemACLsRole must be set if global.secretsBackend.vault.enabled is true and global.acls.replicationToken is provided" ]] + local object=$(helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=acl-role' \ + --set 'global.acls.bootstrapToken.secretName=/vault/boot' \ + --set 'global.acls.bootstrapToken.secretKey=token' \ + --set 'global.acls.partitionToken.secretName=/vault/secret' \ + --set 'global.acls.partitionToken.secretKey=token' \ + --set 'global.adminPartitions.enabled=true' \ + --set "global.adminPartitions.name=default" \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + # Check that the role is set. + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/role"') + [ "${actual}" = "acl-role" ] + + # Check Vault secret annotations. + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-secret-partition-token"') + [ "${actual}" = "/vault/secret" ] + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-template-partition-token"') + local expected=$'{{- with secret \"/vault/secret\" -}}\n{{- .Data.data.token -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + # Check that replication token Kubernetes secret volumes and volumeMounts are not attached. + local actual=$(echo $object | jq -r '.spec.volumes') + [ "${actual}" = "null" ] + + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").volumeMounts') + [ "${actual}" = "null" ] + + # Check that the replication token flag is set to the path of the Vault secret. + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").command | any(contains("-partition-token-file=/vault/secrets/partition-token"))') + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -778,13 +939,16 @@ load _helpers local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ + --set 'global.acls.bootstrapToken.secretName=foo' \ + --set 'global.acls.bootstrapToken.secretKey=bar' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=aclrole' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role")' | tee /dev/stderr) + yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/agent-pre-populate-only") | del(."vault.hashicorp.com/role") | del(."vault.hashicorp.com/agent-inject-secret-bootstrap-token") | del(."vault.hashicorp.com/agent-inject-template-bootstrap-token")' | tee /dev/stderr) [ "${actual}" = "{}" ] } @@ -793,6 +957,8 @@ load _helpers local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ + --set 'global.acls.bootstrapToken.secretName=foo' \ + --set 'global.acls.bootstrapToken.secretKey=bar' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.enabled=true' \ @@ -800,6 +966,7 @@ load _helpers --set 'global.secretsBackend.vault.consulServerRole=foo' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=aclrole' \ --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) @@ -834,7 +1001,7 @@ load _helpers [ "${actual}" = "false" ] local actual=$(echo $object | - yq 'any(contains("create-inject-token"))' | tee /dev/stderr) + yq 'any(contains("connect-inject"))' | tee /dev/stderr) [ "${actual}" = "false" ] local actual=$(echo $object | @@ -881,7 +1048,7 @@ load _helpers [ "${actual}" = "false" ] local actual=$(echo $object | - yq 'any(contains("create-inject-token"))' | tee /dev/stderr) + yq 'any(contains("connect-inject"))' | tee /dev/stderr) [ "${actual}" = "false" ] local actual=$(echo $object | @@ -924,7 +1091,7 @@ load _helpers [ "${actual}" = "false" ] local actual=$(echo $object | - yq 'any(contains("create-inject-token"))' | tee /dev/stderr) + yq 'any(contains("connect-inject"))' | tee /dev/stderr) [ "${actual}" = "false" ] local actual=$(echo $object | @@ -968,7 +1135,7 @@ load _helpers [ "${actual}" = "false" ] local actual=$(echo $object | - yq 'any(contains("create-inject-token"))' | tee /dev/stderr) + yq 'any(contains("connect-inject"))' | tee /dev/stderr) [ "${actual}" = "false" ] local actual=$(echo $object | @@ -1013,7 +1180,7 @@ load _helpers [ "${actual}" = "true" ] local actual=$(echo $object | - yq 'any(contains("create-inject-token"))' | tee /dev/stderr) + yq 'any(contains("connect-inject"))' | tee /dev/stderr) [ "${actual}" = "false" ] local actual=$(echo $object | @@ -1060,7 +1227,7 @@ load _helpers [ "${actual}" = "false" ] local actual=$(echo $object | - yq 'any(contains("create-inject-token"))' | tee /dev/stderr) + yq 'any(contains("connect-inject"))' | tee /dev/stderr) [ "${actual}" = "false" ] local actual=$(echo $object | @@ -1103,7 +1270,7 @@ load _helpers [ "${actual}" = "false" ] local actual=$(echo $object | - yq 'any(contains("create-inject-token"))' | tee /dev/stderr) + yq 'any(contains("connect-inject"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | @@ -1147,7 +1314,7 @@ load _helpers [ "${actual}" = "false" ] local actual=$(echo $object | - yq 'any(contains("create-inject-token"))' | tee /dev/stderr) + yq 'any(contains("connect-inject"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | @@ -1192,7 +1359,7 @@ load _helpers [ "${actual}" = "false" ] local actual=$(echo $object | - yq 'any(contains("create-inject-token"))' | tee /dev/stderr) + yq 'any(contains("connect-inject"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | @@ -1483,64 +1650,36 @@ load _helpers #-------------------------------------------------------------------- # global.acls.bootstrapToken -@test "serverACLInit/Job: -bootstrap-token-file is not set by default" { +@test "serverACLInit/Job: bootstrapToken.secretKey is required when bootstrapToken.secretName is set" { cd `chart_dir` - local object=$(helm template \ + run helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr) - - # Test the flag is not set. - local actual=$(echo "$object" | - yq '.spec.template.spec.containers[0].command | any(contains("-bootstrap-token-file"))' | tee /dev/stderr) - [ "${actual}" = "false" ] - - # Test the volume doesn't exist - local actual=$(echo "$object" | - yq '.spec.template.spec.volumes | length == 0' | tee /dev/stderr) - [ "${actual}" = "true" ] - - # Test the volume mount doesn't exist - local actual=$(echo "$object" | - yq '.spec.template.spec.containers[0].volumeMounts | length == 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + --set 'global.acls.bootstrapToken.secretName=name' \ . + [ "$status" -eq 1 ] + [[ "$output" =~ "both global.acls.bootstrapToken.secretKey and global.acls.bootstrapToken.secretName must be set if one of them is provided" ]] } -@test "serverACLInit/Job: -bootstrap-token-file is not set when acls.bootstrapToken.secretName is set but secretKey is not" { +@test "serverACLInit/Job: bootstrapToken.secretName is required when bootstrapToken.secretKey is set" { cd `chart_dir` - local object=$(helm template \ + run helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.acls.bootstrapToken.secretName=name' \ - . | tee /dev/stderr) - - # Test the flag is not set. - local actual=$(echo "$object" | - yq '.spec.template.spec.containers[0].command | any(contains("-bootstrap-token-file"))' | tee /dev/stderr) - [ "${actual}" = "false" ] - - # Test the volume doesn't exist - local actual=$(echo "$object" | - yq '.spec.template.spec.volumes | length == 0' | tee /dev/stderr) - [ "${actual}" = "true" ] - - # Test the volume mount doesn't exist - local actual=$(echo "$object" | - yq '.spec.template.spec.containers[0].volumeMounts | length == 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + --set 'global.acls.bootstrapToken.secretKey=key' \ . + [ "$status" -eq 1 ] + [[ "$output" =~ "both global.acls.bootstrapToken.secretKey and global.acls.bootstrapToken.secretName must be set if one of them is provided" ]] } -@test "serverACLInit/Job: -bootstrap-token-file is not set when acls.bootstrapToken.secretKey is set but secretName is not" { +@test "serverACLInit/Job: -bootstrap-token-file is not set by default" { cd `chart_dir` local object=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.acls.bootstrapToken.secretKey=key' \ . | tee /dev/stderr) # Test the flag is not set. local actual=$(echo "$object" | - yq '.spec.template.spec.containers[0].command | any(contains("-bootstrap-token-file"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-bootstrap-token-file"))' | tee /dev/stderr) [ "${actual}" = "false" ] # Test the volume doesn't exist @@ -1613,7 +1752,7 @@ load _helpers --set 'global.acls.manageSystemACLs=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-inject-auth-method-host"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-auth-method-host"))' | tee /dev/stderr) [ "${actual}" = "false" ] } @@ -1625,11 +1764,11 @@ load _helpers --set 'externalServers.k8sAuthMethodHost=foo.com' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-inject-auth-method-host"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-auth-method-host"))' | tee /dev/stderr) [ "${actual}" = "false" ] } -@test "serverACLInit/Job: can provide custom auth method host" { +@test "serverACLInit/Job: can provide custom auth method host for external servers" { cd `chart_dir` local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ @@ -1640,31 +1779,48 @@ load _helpers --set 'externalServers.hosts[0]=foo.com' \ --set 'externalServers.k8sAuthMethodHost=foo.com' \ . | tee /dev/stderr| - yq '.spec.template.spec.containers[0].command | any(contains("-inject-auth-method-host=foo.com"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-auth-method-host=foo.com"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInit/Job: can provide custom auth method host for federation" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'global.federation.k8sAuthMethodHost=foo.com' \ + --set 'meshGateway.enabled=true' \ + . | tee /dev/stderr| + yq '.spec.template.spec.containers[0].command | any(contains("-auth-method-host=foo.com"))' | tee /dev/stderr) [ "${actual}" = "true" ] } #-------------------------------------------------------------------- # controller -@test "serverACLInit/Job: -create-controller-token not set by default" { +@test "serverACLInit/Job: -controller not set by default" { cd `chart_dir` local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("create-controller-token"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("controller"))' | tee /dev/stderr) [ "${actual}" = "false" ] } -@test "serverACLInit/Job: -create-controller-token set when controller.enabled=true" { +@test "serverACLInit/Job: -controller set when controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'controller.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("create-controller-token"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("controller"))' | tee /dev/stderr) [ "${actual}" = "true" ] } diff --git a/charts/consul/test/unit/server-acl-init-role.bats b/charts/consul/test/unit/server-acl-init-role.bats index 9d8d8f4573..92cdd78d16 100644 --- a/charts/consul/test/unit/server-acl-init-role.bats +++ b/charts/consul/test/unit/server-acl-init-role.bats @@ -53,14 +53,13 @@ load _helpers } #-------------------------------------------------------------------- -# connectInject.enabled +# manageSystemACLs.enabled -@test "serverACLInit/Role: allows service accounts when connectInject.enabled is true" { +@test "serverACLInit/Role: allows service accounts when manageSystemACLs.enabled is true" { cd `chart_dir` local actual=$(helm template \ -s templates/server-acl-init-role.yaml \ --set 'global.acls.manageSystemACLs=true' \ - --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -r '.rules | map(select(.resources[0] == "serviceaccounts")) | length' | tee /dev/stderr) [ "${actual}" = "1" ] diff --git a/charts/consul/test/unit/server-statefulset.bats b/charts/consul/test/unit/server-statefulset.bats index d26389983b..24b811e19f 100755 --- a/charts/consul/test/unit/server-statefulset.bats +++ b/charts/consul/test/unit/server-statefulset.bats @@ -1228,6 +1228,68 @@ load _helpers [ "${actual}" = "false" ] } +#-------------------------------------------------------------------- +# global.acls.bootstrapToken + +@test "server/StatefulSet: when global.acls.bootstrapToken.secretKey!=null and global.acls.bootstrapToken.secretName=null, fail" { + cd `chart_dir` + run helm template \ + -s templates/server-statefulset.yaml \ + --set 'global.acls.bootstrapToken.secretName=' \ + --set 'global.acls.bootstrapToken.secretKey=enterpriselicense' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "both global.acls.bootstrapToken.secretKey and global.acls.bootstrapToken.secretName must be set if one of them is provided." ]] +} + +@test "server/StatefulSet: when global.acls.bootstrapToken.secretName!=null and global.acls.bootstrapToken.secretKey=null, fail" { + cd `chart_dir` + run helm template \ + -s templates/server-statefulset.yaml \ + --set 'global.acls.bootstrapToken.secretName=foo' \ + --set 'global.acls.bootstrapToken.secretKey=' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "both global.acls.bootstrapToken.secretKey and global.acls.bootstrapToken.secretName must be set if one of them is provided." ]] +} + +@test "server/StatefulSet: acl bootstrap token config is not set by default" { + cd `chart_dir` + local object=$(helm template \ + -s templates/server-statefulset.yaml \ + . | tee /dev/stderr) + + # Test the flag is not set. + local actual=$(echo "$object" | + yq '.spec.template.spec.containers[0].command | any(contains("ACL_BOOTSTRAP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + # Test the ACL_BOOTSTRAP_TOKEN environment variable is not set. + local actual=$(echo "$object" | + yq '.spec.template.spec.containers[0].env | map(select(.name == "ACL_BOOTSTRAP_TOKEN")) | length == 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/StatefulSet: acl bootstrap token config is set when acls.bootstrapToken.secretKey and secretName are set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/server-statefulset.yaml \ + --set 'global.acls.enabled=true' \ + --set 'global.acls.bootstrapToken.secretName=name' \ + --set 'global.acls.bootstrapToken.secretKey=key' \ + . | tee /dev/stderr) + + # Test the flag is set. + local actual=$(echo "$object" | + yq '.spec.template.spec.containers[0].command | any(contains("-hcl=\"acl { tokens { initial_management = \\\"${ACL_BOOTSTRAP_TOKEN}\\\" } }\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + # Test the ACL_BOOTSTRAP_TOKEN environment variable is set. + local actual=$(echo "$object" | + yq -r -c '.spec.template.spec.containers[0].env | map(select(.name == "ACL_BOOTSTRAP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = '[{"name":"ACL_BOOTSTRAP_TOKEN","valueFrom":{"secretKeyRef":{"name":"name","key":"key"}}}]' ] +} + #-------------------------------------------------------------------- # global.acls.replicationToken @@ -1786,7 +1848,7 @@ load _helpers local actual=$(echo $object | yq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-servercert.crt"]' | tee /dev/stderr) - local expected=$'{{- with secret \"pki_int/issue/test\" \"common_name=server.dc2.consul\"\n\"alt_names=localhost,RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server.default,*.RELEASE-NAME-consul-server.default.svc,*.server.dc2.consul\" \"ip_sans=127.0.0.1\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' + local expected=$'{{- with secret \"pki_int/issue/test\" \"common_name=server.dc2.consul\"\n\"alt_names=localhost,RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server.default,RELEASE-NAME-consul-server.default,*.RELEASE-NAME-consul-server.default.svc,RELEASE-NAME-consul-server.default.svc,*.server.dc2.consul\" \"ip_sans=127.0.0.1\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' [ "${actual}" = "${expected}" ] local actual="$(echo $object | @@ -1795,7 +1857,7 @@ load _helpers local actual="$(echo $object | yq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-servercert.key"]' | tee /dev/stderr)" - local expected=$'{{- with secret \"pki_int/issue/test\" \"common_name=server.dc2.consul\"\n\"alt_names=localhost,RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server.default,*.RELEASE-NAME-consul-server.default.svc,*.server.dc2.consul\" \"ip_sans=127.0.0.1\" -}}\n{{- .Data.private_key -}}\n{{- end -}}' + local expected=$'{{- with secret \"pki_int/issue/test\" \"common_name=server.dc2.consul\"\n\"alt_names=localhost,RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server.default,RELEASE-NAME-consul-server.default,*.RELEASE-NAME-consul-server.default.svc,RELEASE-NAME-consul-server.default.svc,*.server.dc2.consul\" \"ip_sans=127.0.0.1\" -}}\n{{- .Data.private_key -}}\n{{- end -}}' [ "${actual}" = "${expected}" ] local actual=$(echo $object | @@ -1846,12 +1908,12 @@ load _helpers local actual=$(echo $object | yq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-servercert.crt"]' | tee /dev/stderr) - local expected=$'{{- with secret \"pki_int/issue/test\" \"common_name=server.dc2.consul\"\n\"alt_names=localhost,RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server.default,*.RELEASE-NAME-consul-server.default.svc,*.server.dc2.consul,*.foo.com,*.bar.com\" \"ip_sans=127.0.0.1\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' + local expected=$'{{- with secret \"pki_int/issue/test\" \"common_name=server.dc2.consul\"\n\"alt_names=localhost,RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server.default,RELEASE-NAME-consul-server.default,*.RELEASE-NAME-consul-server.default.svc,RELEASE-NAME-consul-server.default.svc,*.server.dc2.consul,*.foo.com,*.bar.com\" \"ip_sans=127.0.0.1\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' [ "${actual}" = "${expected}" ] local actual="$(echo $object | yq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-servercert.key"]' | tee /dev/stderr)" - local expected=$'{{- with secret \"pki_int/issue/test\" \"common_name=server.dc2.consul\"\n\"alt_names=localhost,RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server.default,*.RELEASE-NAME-consul-server.default.svc,*.server.dc2.consul,*.foo.com,*.bar.com\" \"ip_sans=127.0.0.1\" -}}\n{{- .Data.private_key -}}\n{{- end -}}' + local expected=$'{{- with secret \"pki_int/issue/test\" \"common_name=server.dc2.consul\"\n\"alt_names=localhost,RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server.default,RELEASE-NAME-consul-server.default,*.RELEASE-NAME-consul-server.default.svc,RELEASE-NAME-consul-server.default.svc,*.server.dc2.consul,*.foo.com,*.bar.com\" \"ip_sans=127.0.0.1\" -}}\n{{- .Data.private_key -}}\n{{- end -}}' [ "${actual}" = "${expected}" ] } @@ -1875,12 +1937,12 @@ load _helpers local actual=$(echo $object | yq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-servercert.crt"]' | tee /dev/stderr) - local expected=$'{{- with secret \"pki_int/issue/test\" \"common_name=server.dc2.consul\"\n\"alt_names=localhost,RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server.default,*.RELEASE-NAME-consul-server.default.svc,*.server.dc2.consul\" \"ip_sans=127.0.0.1,1.1.1.1,2.2.2.2\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' + local expected=$'{{- with secret \"pki_int/issue/test\" \"common_name=server.dc2.consul\"\n\"alt_names=localhost,RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server.default,RELEASE-NAME-consul-server.default,*.RELEASE-NAME-consul-server.default.svc,RELEASE-NAME-consul-server.default.svc,*.server.dc2.consul\" \"ip_sans=127.0.0.1,1.1.1.1,2.2.2.2\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' [ "${actual}" = "${expected}" ] local actual="$(echo $object | yq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-servercert.key"]' | tee /dev/stderr)" - local expected=$'{{- with secret \"pki_int/issue/test\" \"common_name=server.dc2.consul\"\n\"alt_names=localhost,RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server.default,*.RELEASE-NAME-consul-server.default.svc,*.server.dc2.consul\" \"ip_sans=127.0.0.1,1.1.1.1,2.2.2.2\" -}}\n{{- .Data.private_key -}}\n{{- end -}}' + local expected=$'{{- with secret \"pki_int/issue/test\" \"common_name=server.dc2.consul\"\n\"alt_names=localhost,RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server,*.RELEASE-NAME-consul-server.default,RELEASE-NAME-consul-server.default,*.RELEASE-NAME-consul-server.default.svc,RELEASE-NAME-consul-server.default.svc,*.server.dc2.consul\" \"ip_sans=127.0.0.1,1.1.1.1,2.2.2.2\" -}}\n{{- .Data.private_key -}}\n{{- end -}}' [ "${actual}" = "${expected}" ] } @@ -1981,6 +2043,38 @@ load _helpers [ "${actual}" = "bar" ] } +#-------------------------------------------------------------------- +# Vault bootstrap token + +@test "server/StatefulSet: vault bootstrap token is configured when secret provided" { + cd `chart_dir` + local object=$(helm template \ + -s templates/server-statefulset.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.manageSystemACLsRole=aclrole' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.acls.bootstrapToken.secretName=vault/bootstrap-token' \ + --set 'global.acls.bootstrapToken.secretKey=token' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + # Check that Vault annotations are set. + local actual="$(echo $object | + yq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-secret-bootstrap-token-config.hcl"]' | tee /dev/stderr)" + [ "${actual}" = "vault/bootstrap-token" ] + + local actual="$(echo $object | + yq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-bootstrap-token-config.hcl"]' | tee /dev/stderr)" + local expected=$'{{- with secret \"vault/bootstrap-token\" -}}\nacl { tokens { initial_management = \"{{- .Data.data.token -}}\" }}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + # Check that path to Vault secret config is provided to the command. + local actual="$(echo $object | yq -r '.spec.containers[] | select(.name=="consul").command | any(contains("-config-file=/vault/secrets/bootstrap-token-config.hcl"))' | tee /dev/stderr)" + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # Vault replication token diff --git a/charts/consul/test/unit/sync-catalog-clusterrole.bats b/charts/consul/test/unit/sync-catalog-clusterrole.bats index 0688db9b93..17141e434f 100755 --- a/charts/consul/test/unit/sync-catalog-clusterrole.bats +++ b/charts/consul/test/unit/sync-catalog-clusterrole.bats @@ -60,20 +60,6 @@ load _helpers [ "${actual}" = "podsecuritypolicies" ] } -#-------------------------------------------------------------------- -# global.acls.manageSystemACLs - -@test "syncCatalog/ClusterRole: allows secret access with global.acls.manageSystemACLs=true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/sync-catalog-clusterrole.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r '.rules[2].resources[0]' | tee /dev/stderr) - [ "${actual}" = "secrets" ] -} - #-------------------------------------------------------------------- # syncCatalog.toK8S={true,false} diff --git a/charts/consul/test/unit/sync-catalog-deployment.bats b/charts/consul/test/unit/sync-catalog-deployment.bats index 8beead1564..18171942af 100755 --- a/charts/consul/test/unit/sync-catalog-deployment.bats +++ b/charts/consul/test/unit/sync-catalog-deployment.bats @@ -421,33 +421,243 @@ load _helpers #-------------------------------------------------------------------- # global.acls.manageSystemACLs -@test "syncCatalog/Deployment: CONSUL_HTTP_TOKEN env variable created when global.acls.manageSystemACLs=true" { +@test "syncCatalog/Deployment: consul-logout preStop hook is added when ACLs are enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("consul-k8s-control-plane consul-logout"))' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls disabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "sync-catalog-acl-init" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "sync-catalog-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "sync-catalog-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=RELEASE-NAME-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "sync-catalog-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true" { +@test "syncCatalog/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { cd `chart_dir` local object=$(helm template \ -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) local actual=$(echo $object | yq -r '.name' | tee /dev/stderr) - [ "${actual}" = "sync-acl-init" ] + [ "${actual}" = "get-auto-encrypt-client-ca" ] +} + +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when in non-primary datacenter with Consul Namespaces disabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "sync-catalog-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=RELEASE-NAME-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when in non-primary datacenter with Consul Namespaces enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "sync-catalog-acl-init")' | tee /dev/stderr) local actual=$(echo $object | yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=RELEASE-NAME-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-primary-datacenter=dc1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- diff --git a/charts/consul/test/unit/terminating-gateways-deployment.bats b/charts/consul/test/unit/terminating-gateways-deployment.bats index 4a23a232ba..2d6fe575d3 100644 --- a/charts/consul/test/unit/terminating-gateways-deployment.bats +++ b/charts/consul/test/unit/terminating-gateways-deployment.bats @@ -25,6 +25,29 @@ load _helpers [ "${actual}" = "RELEASE-NAME-consul-terminating-gateway" ] } +@test "terminatingGateways/Deployment: Adds consul service volumeMount to gateway container" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | yq '.spec.template.spec.containers[0].volumeMounts[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "consul-service" ] + + local actual=$(echo $object | + yq -r '.mountPath' | tee /dev/stderr) + [ "${actual}" = "/consul/service" ] + + local actual=$(echo $object | + yq -r '.readOnly' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # prerequisites @@ -72,6 +95,37 @@ load _helpers [[ "$output" =~ "clients must be enabled" ]] } +@test "terminatingGateways/Deployment: fails if there are duplicate gateway names" { + cd `chart_dir` + run helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'terminatingGateways.gateways[0].name=foo' \ + --set 'terminatingGateways.gateways[1].name=foo' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=true' \ + --set 'client.enabled=true' . + echo "status: $output" + [ "$status" -eq 1 ] + [[ "$output" =~ "terminating gateways must have unique names but found duplicate name foo" ]] +} + +@test "terminatingGateways/Deployment: fails if a terminating gateway has the same name as an ingress gateway" { + cd `chart_dir` + run helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'ingressGateways.enabled=true' \ + --set 'terminatingGateways.gateways[0].name=foo' \ + --set 'ingressGateways.gateways[0].name=foo' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=true' \ + --set 'client.enabled=true' . + echo "status: $output" + [ "$status" -eq 1 ] + [[ "$output" =~ "terminating gateways cannot have duplicate names of any ingress gateways but found duplicate name foo" ]] +} + #-------------------------------------------------------------------- # envoyImage @@ -231,10 +285,24 @@ load _helpers [ "${actual}" = "" ] } +@test "terminatingGateways/Deployment: serviceAccountName is set properly" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'terminatingGateways.defaults.consulNamespace=namespace' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.serviceAccountName' | tee /dev/stderr) + + [ "${actual}" = "RELEASE-NAME-consul-terminating-gateway" ] +} + #-------------------------------------------------------------------- # global.acls.manageSystemACLs -@test "terminatingGateways/Deployment: CONSUL_HTTP_TOKEN env variable created when global.acls.manageSystemACLs=true" { +@test "terminatingGateways/Deployment: consul-sidecar uses -token-file flag when global.acls.manageSystemACLs=true" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ @@ -242,19 +310,104 @@ load _helpers --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq -s '[.[0].spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + yq -s '.[0].spec.template.spec.containers[1].command | any(contains("-token-file=/consul/service/acl-token"))' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "terminatingGateways/Deployment: consul-sidecar uses -token-file flag when global.acls.manageSystemACLs=true" { +@test "terminatingGateways/Deployment: Adds consul envvars CONSUL_HTTP_ADDR on terminating-gateway-init init container when ACLs are enabled and tls is enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "https://\$(HOST_IP):8501" ] +} + +@test "terminatingGateways/Deployment: Adds consul envvars CONSUL_HTTP_ADDR on terminating-gateway-init init container when ACLs are enabled and tls is not enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "http://\$(HOST_IP):8500" ] +} + +@test "terminatingGateways/Deployment: Does not add consul envvars CONSUL_CACERT on terminating-gateway-init init container when ACLs are enabled and tls is not enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ --set 'terminatingGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1].env[] | select(.name == "CONSUL_CACERT")' | tee /dev/stderr) + + [ "${actual}" = "" ] +} + +@test "terminatingGateways/Deployment: Adds consul envvars CONSUL_CACERT on terminating-gateway-init init container when ACLs are enabled and tls is enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -s '.[0].spec.template.spec.containers[1].command | any(contains("-token-file=/consul/service/acl-token"))' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "terminatingGateways/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=false' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "terminatingGateways/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -s '[.[0].spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "terminatingGateways/Deployment: consul-logout preStop hook is added when ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[3]] | any(contains("/consul-bin/consul logout"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1005,16 +1158,16 @@ key2: value2' \ } #-------------------------------------------------------------------- -# service-init init container command +# terminating-gateway-init init container command -@test "terminatingGateways/Deployment: service-init init container defaults" { +@test "terminatingGateways/Deployment: terminating-gateway-init init container defaults" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "terminating-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp=' cat > /consul/service/service.hcl << EOF @@ -1041,25 +1194,28 @@ EOF [ "${actual}" = "${exp}" ] } -@test "terminatingGateways/Deployment: service-init init container with acls.manageSystemACLs=true" { +@test "terminatingGateways/Deployment: terminating-gateway-init init container with acls.manageSystemACLs=true" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ + --set 'terminatingGateways.gateways[0].name=terminating' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "terminating-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp='consul-k8s-control-plane acl-init \ - -secret-name="RELEASE-NAME-consul-terminating-gateway-terminating-gateway-acl-token" \ - -k8s-namespace=default \ - -token-sink-file=/consul/service/acl-token + -component-name=terminating-gateway/RELEASE-NAME-consul-terminating \ + -acl-auth-method=RELEASE-NAME-consul-k8s-component-auth-method \ + -token-sink-file=/consul/service/acl-token \ + -log-level=info \ + -log-json=false cat > /consul/service/service.hcl << EOF service { kind = "terminating-gateway" - name = "terminating-gateway" + name = "terminating" id = "${POD_NAME}" address = "${POD_IP}" port = 8443 @@ -1081,7 +1237,7 @@ EOF [ "${actual}" = "${exp}" ] } -@test "terminatingGateways/Deployment: service-init init container gateway namespace can be specified through defaults" { +@test "terminatingGateways/Deployment: terminating-gateway-init init container gateway namespace can be specified through defaults" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ @@ -1090,7 +1246,7 @@ EOF --set 'global.enableConsulNamespaces=true' \ --set 'terminatingGateways.defaults.consulNamespace=namespace' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "terminating-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp=' cat > /consul/service/service.hcl << EOF @@ -1118,7 +1274,7 @@ EOF [ "${actual}" = "${exp}" ] } -@test "terminatingGateways/Deployment: service-init init container gateway namespace can be specified through specific gateway overriding defaults" { +@test "terminatingGateways/Deployment: terminating-gateway-init init container gateway namespace can be specified through specific gateway overriding defaults" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ @@ -1129,7 +1285,7 @@ EOF --set 'terminatingGateways.gateways[0].name=terminating-gateway' \ --set 'terminatingGateways.gateways[0].consulNamespace=new-namespace' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "service-init"))[0] | .command[2]' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "terminating-gateway-init"))[0] | .command[2]' | tee /dev/stderr) exp=' cat > /consul/service/service.hcl << EOF diff --git a/charts/consul/test/unit/terminating-gateways-role.bats b/charts/consul/test/unit/terminating-gateways-role.bats index a5c535e53a..708df66a6c 100644 --- a/charts/consul/test/unit/terminating-gateways-role.bats +++ b/charts/consul/test/unit/terminating-gateways-role.bats @@ -46,7 +46,7 @@ load _helpers [ "${actual}" = "secrets" ] local actual=$(echo $object | yq -r '.resourceNames[0]' | tee /dev/stderr) - [ "${actual}" = "RELEASE-NAME-consul-terminating-gateway-terminating-gateway-acl-token" ] + [ "${actual}" = "RELEASE-NAME-consul-terminating-gateway-acl-token" ] } @test "terminatingGateways/Role: rules is empty if no ACLs, PSPs" { diff --git a/charts/consul/test/unit/tls-init-job.bats b/charts/consul/test/unit/tls-init-job.bats index 8db52fcfbd..bf4d116574 100644 --- a/charts/consul/test/unit/tls-init-job.bats +++ b/charts/consul/test/unit/tls-init-job.bats @@ -70,6 +70,37 @@ load _helpers [ "${actual}" = "true" ] } +@test "tlsInit/Job: sets additional DNS SANs by default when global.tls.enabled=true" { + cd `chart_dir` + local command=$(helm template \ + -s templates/tls-init-job.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$command" | + yq 'any(contains("additional-dnsname=\"RELEASE-NAME-consul-server\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual=$(echo "$command" | + yq 'any(contains("additional-dnsname=\"*.RELEASE-NAME-consul-server\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual=$(echo "$command" | + yq 'any(contains("additional-dnsname=\"*.RELEASE-NAME-consul-server.${NAMESPACE}\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual=$(echo "$command" | + yq 'any(contains("additional-dnsname=\"RELEASE-NAME-consul-server.${NAMESPACE}\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual=$(echo "$command" | + yq 'any(contains("additional-dnsname=\"*.RELEASE-NAME-consul-server.${NAMESPACE}.svc\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual=$(echo "$command" | + yq 'any(contains("additional-dnsname=\"RELEASE-NAME-consul-server.${NAMESPACE}.svc\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual=$(echo "$command" | + yq 'any(contains("additional-dnsname=\"*.server.dc1.consul\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + @test "tlsInit/Job: sets additional DNS SANs when provided and global.tls.enabled=true" { cd `chart_dir` local actual=$(helm template \ diff --git a/charts/consul/values.yaml b/charts/consul/values.yaml index 4d019b36be..5bce7c52eb 100644 --- a/charts/consul/values.yaml +++ b/charts/consul/values.yaml @@ -38,10 +38,12 @@ global: # must be installed in the default partition. Creation of Admin Partitions is only supported during installation. # Admin Partitions cannot be installed via a Helm upgrade operation. Only Helm installs are supported. enabled: false + # The name of the Admin Partition. The partition name cannot be modified once the partition has been installed. # Changing the partition name would require an un-install and a re-install with the updated name. # Must be "default" in the server cluster ie the Kubernetes cluster that the Consul server pods are deployed onto. name: "default" + # Partition service properties. service: type: LoadBalancer @@ -85,7 +87,7 @@ global: # image: "hashicorp/consul-enterprise:1.10.0-ent" # ``` # @default: hashicorp/consul: - image: "hashicorp/consul:1.11.3" + image: "hashicorp/consul:1.11.4" # Array of objects containing image pull secret names that will be applied to each service account. # This can be used to reference image pull secrets if using a custom consul or consul-k8s-control-plane Docker image. @@ -155,21 +157,42 @@ global: # - gossip encryption key defined by `global.gossipEncryption.secretName`. # To discover the service account name of the Consul client, run # ```shell-session - # $ helm template --show-only templates/client-serviceaccount.yaml charts/consul + # $ helm template --show-only templates/client-serviceaccount.yaml hashicorp/consul # ``` # and check the name of `metadata.name`. consulClientRole: "" + # [Enterprise Only] The Vault role for the Consul client snapshot agent. + # The role must be connected to the Consul client snapshot agent's service account and + # have a policy with read capabilities for the snapshot agent config defined by `client.snapshotAgent.configSecret.secretName`. + # To discover the service account name of the Consul client, run + # ```shell-session + # $ helm template --show-only templates/client-snapshot-agent-serviceaccount.yaml --set client.snapshotAgent.enabled=true hashicorp/consul + # ``` + # and check the name of `metadata.name`. + consulSnapshotAgentRole: "" + # A Vault role to allow Kubernetes job that manages ACLs for this Helm chart (`server-acl-init`) - # to read and update Vault secrets for the Consul's bootstrap and replication tokens. + # to read and update Vault secrets for the Consul's bootstrap, replication or partition tokens. # This role must be bound the `server-acl-init`'s service account. # To discover the service account name of the `server-acl-init` job, run # ```shell-session - # $ helm template --show-only templates/server-acl-init-serviceaccount.yaml charts/consul + # $ helm template --show-only templates/server-acl-init-serviceaccount.yaml \ + # --set global.acls.manageSystemACLs=true hashicorp/consul # ``` # and check the name of `metadata.name`. manageSystemACLsRole: "" + # [Enterprise Only] A Vault role to allow Kubernetes job that creates a Consul partition for this Helm chart (`partition-init`) + # to read Vault secret for the partition ACL token. + # This role must be bound the `partition-init`'s service account. + # To discover the service account name of the `partition-init` job, run with Helm values for the client cluster: + # ```shell-session + # $ helm template --show-only templates/partition-init-serviceaccount.yaml -f client-cluster-values.yaml hashicorp/consul + # ``` + # and check the name of `metadata.name`. + adminPartitionsRole: "" + # This value defines additional annotations for # Vault agent on any pods where it'll be running. # This should be formatted as a multi-line string. @@ -397,6 +420,18 @@ global: # The key of the Kubernetes or Vault secret. secretKey: null + # partitionToken references a Vault secret containing the ACL token to be used in non-default partitions. + # This value should only be provided in the default partition and only when setting + # `global.secretsBackend.vault.enabled` to true. + # We will use the value of the secret stored in Vault to create an ACL token in Consul with the value of the + # secret as the secretID for the token. + # In non-default, partitions set this secret as the `bootstrapToken`. + partitionToken: + # The name of the path of the secret in Vault. + secretName: null + # The key of the Vault secret. + secretKey: null + # [Enterprise Only] This value refers to a Kubernetes secret that you have created # that contains your enterprise license. It is required if you are using an # enterprise binary. Defining it here applies it to your cluster once a leader @@ -431,13 +466,31 @@ global: createFederationSecret: false # The name of the primary datacenter. - primaryDatacenter: "" + # @type: string + primaryDatacenter: null # A list of addresses of the primary mesh gateways in the form `:`. - # (e.g. ["1.1.1.1:443", "2.3.4.5:443"] + # (e.g. ["1.1.1.1:443", "2.3.4.5:443"] # @type: array primaryGateways: [] + # If you are setting `global.federation.enabled` to true and are in a secondary datacenter, + # set `k8sAuthMethodHost` to the address of the Kubernetes API server of the secondary datacenter. + # This address must be reachable from the Consul servers in the primary datacenter. + # This authmethod will be used to provision ACL tokens for Consul components and is different + # from the one used by the Consul Service Mesh. + # Please see the Kubernetes Auth Method documentation (https://consul.io/docs/acl/auth-methods/kubernetes). + # + # You could retrieve this value from your `kubeconfig` by running: + # + # ```shell-session + # $ kubectl config view \ + # -o jsonpath="{.clusters[?(@.name=='')].cluster.server}" + # ``` + # + # @type: string + k8sAuthMethodHost: null + # Configures metrics for Consul service mesh metrics: # Configures the Helm chart’s components @@ -1225,15 +1278,15 @@ client: # The number of snapshot agents to run. replicas: 2 - # A Kubernetes secret that should be manually created to contain the entire + # A Kubernetes or Vault secret that should be manually created to contain the entire # config to be used on the snapshot agent. # This is the preferred method of configuration since there are usually storage # credentials present. Please see Snapshot agent config (https://consul.io/commands/snapshot/agent#config-file-options) # for details. configSecret: - # The name of the Kubernetes secret. + # secretName is the name of the Kubernetes secret or Vault secret path that holds the snapshot agentconfig. secretName: null - # The key of the Kubernetes secret. + # secretKey is the key within the Kubernetes secret or Vault secret key that holds the snapshot agentconfig. secretKey: null serviceAccount: @@ -2642,6 +2695,29 @@ apiGateway: # @type: string annotations: null + # Resource settings for api gateway pods. + # @recurse: false + # @type: map + resources: + requests: + memory: "100Mi" + cpu: "100m" + limits: + memory: "100Mi" + cpu: "100m" + + # Resource settings for the `copy-consul-bin` init container. + # @recurse: false + # @type: map + initCopyConsulContainer: + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "150Mi" + cpu: "50m" + # Configuration settings for the webhook-cert-manager # `webhook-cert-manager` ensures that cert bundles are up to date for the mutating webhook. webhookCertManager: diff --git a/control-plane/commands.go b/control-plane/commands.go index db43863642..8d5e8de23e 100644 --- a/control-plane/commands.go +++ b/control-plane/commands.go @@ -5,6 +5,7 @@ import ( cmdACLInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/acl-init" cmdConnectInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/connect-init" + cmdConsulLogout "github.com/hashicorp/consul-k8s/control-plane/subcommand/consul-logout" cmdConsulSidecar "github.com/hashicorp/consul-k8s/control-plane/subcommand/consul-sidecar" cmdController "github.com/hashicorp/consul-k8s/control-plane/subcommand/controller" cmdCreateFederationSecret "github.com/hashicorp/consul-k8s/control-plane/subcommand/create-federation-secret" @@ -46,6 +47,10 @@ func init() { return &cmdConsulSidecar.Command{UI: ui}, nil }, + "consul-logout": func() (cli.Command, error) { + return &cmdConsulLogout.Command{UI: ui}, nil + }, + "server-acl-init": func() (cli.Command, error) { return &cmdServerACLInit.Command{UI: ui}, nil }, diff --git a/control-plane/helper/test/test_util.go b/control-plane/helper/test/test_util.go index 365dc54363..142aa535c5 100644 --- a/control-plane/helper/test/test_util.go +++ b/control-plane/helper/test/test_util.go @@ -15,6 +15,10 @@ import ( "github.com/stretchr/testify/require" ) +const ( + componentAuthMethod = "consul-k8s-component-auth-method" +) + // GenerateServerCerts generates Consul CA // and a server certificate and saves them to temp files. // It returns file names in this order: @@ -57,6 +61,79 @@ func GenerateServerCerts(t *testing.T) (string, string, string) { return caFile.Name(), certFile.Name(), certKeyFile.Name() } +// SetupK8sComponentAuthMethod creates a k8s auth method, sample "acl:write" ACL policy, Role and BindingRule +// that allows a client using serviceAccount's JWT token to call "consul login". +func SetupK8sComponentAuthMethod(t *testing.T, consulClient *api.Client, serviceAccountName, k8sComponentNS string) { + t.Helper() + // Start the mock k8s server. + k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("content-type", "application/json") + if r != nil && r.URL.Path == "/apis/authentication.k8s.io/v1/tokenreviews" && r.Method == "POST" { + w.Write([]byte(TokenReviewsResponse(serviceAccountName, k8sComponentNS))) + } + if r != nil && r.URL.Path == fmt.Sprintf("/api/v1/namespaces/%s/serviceaccounts/%s", k8sComponentNS, serviceAccountName) && + r.Method == "GET" { + w.Write([]byte(ServiceAccountGetResponse(serviceAccountName, k8sComponentNS))) + } + })) + t.Cleanup(k8sMockServer.Close) + + // Set up Component's auth method. + authMethodTmpl := api.ACLAuthMethod{ + Name: componentAuthMethod, + Type: "kubernetes", + Description: "Kubernetes Auth Method", + Config: map[string]interface{}{ + "Host": k8sMockServer.URL, + "CACert": serviceAccountCACert, + "ServiceAccountJWT": ServiceAccountJWTToken, + }, + } + // This API call will idempotently create the auth method (it won't fail if it already exists). + _, _, err := consulClient.ACL().AuthMethodCreate(&authMethodTmpl, nil) + require.NoError(t, err) + + rules := `acl = "write"` + policyName := fmt.Sprintf("%s-token", serviceAccountName) + policy := api.ACLPolicy{ + Name: policyName, + Description: fmt.Sprintf("%s Token Policy", policyName), + Rules: rules, + Datacenters: []string{"dc1"}, + } + _, _, err = consulClient.ACL().PolicyCreate(&policy, &api.WriteOptions{}) + require.NoError(t, err) + + // Create the ACL Role, it requires an ACLRolePolicyLink which contains a list + // of ACL policies that are allowed to be fetched by an associated ACLBindingRule. + ap := &api.ACLRolePolicyLink{ + Name: policyName, + } + apl := []*api.ACLRolePolicyLink{} + apl = append(apl, ap) + aclRoleName := fmt.Sprintf("%s-acl-role", serviceAccountName) + role := &api.ACLRole{ + Name: aclRoleName, + Description: fmt.Sprintf("ACL Role for %s", serviceAccountName), + Policies: apl, + } + _, _, err = consulClient.ACL().RoleCreate(role, &api.WriteOptions{}) + require.NoError(t, err) + + // Create the ACLBindingRule, this specifies that a user using the AuthMethod + // is able to request an ACL Token with associated ACLRole from above via BindName + // as long as its serviceaccount matches the Selector. + abr := api.ACLBindingRule{ + Description: fmt.Sprintf("Binding Rule for %s", serviceAccountName), + AuthMethod: componentAuthMethod, + Selector: fmt.Sprintf("serviceaccount.name==%q", serviceAccountName), + BindType: api.BindingRuleBindTypeRole, + BindName: aclRoleName, + } + _, _, err = consulClient.ACL().BindingRuleCreate(&abr, nil) + require.NoError(t, err) +} + // SetupK8sAuthMethod create a k8s auth method and a binding rule in Consul for the // given k8s service and namespace. func SetupK8sAuthMethod(t *testing.T, consulClient *api.Client, serviceName, k8sServiceNS string) { @@ -72,11 +149,11 @@ func SetupK8sAuthMethodWithNamespaces(t *testing.T, consulClient *api.Client, se k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("content-type", "application/json") if r != nil && r.URL.Path == "/apis/authentication.k8s.io/v1/tokenreviews" && r.Method == "POST" { - w.Write([]byte(tokenReviewsResponse(serviceName, k8sServiceNS))) + w.Write([]byte(TokenReviewsResponse(serviceName, k8sServiceNS))) } if r != nil && r.URL.Path == fmt.Sprintf("/api/v1/namespaces/%s/serviceaccounts/%s", k8sServiceNS, serviceName) && r.Method == "GET" { - w.Write([]byte(serviceAccountGetResponse(serviceName, k8sServiceNS))) + w.Write([]byte(ServiceAccountGetResponse(serviceName, k8sServiceNS))) } })) t.Cleanup(k8sMockServer.Close) @@ -119,7 +196,7 @@ func SetupK8sAuthMethodWithNamespaces(t *testing.T, consulClient *api.Client, se require.NoError(t, err) } -func tokenReviewsResponse(name, ns string) string { +func TokenReviewsResponse(name, ns string) string { return fmt.Sprintf(`{ "kind": "TokenReview", "apiVersion": "authentication.k8s.io/v1", @@ -144,7 +221,7 @@ func tokenReviewsResponse(name, ns string) string { }`, ns, name, ns) } -func serviceAccountGetResponse(name, ns string) string { +func ServiceAccountGetResponse(name, ns string) string { return fmt.Sprintf(`{ "kind": "ServiceAccount", "apiVersion": "v1", diff --git a/control-plane/subcommand/acl-init/command.go b/control-plane/subcommand/acl-init/command.go index 6017137bbf..dea1f0d688 100644 --- a/control-plane/subcommand/acl-init/command.go +++ b/control-plane/subcommand/acl-init/command.go @@ -12,53 +12,105 @@ import ( "text/template" "time" + "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/subcommand" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-discover" + "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) +const ( + defaultBearerTokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + defaultTokenSinkFile = "/consul/login/acl-token" +) + type Command struct { UI cli.Ui - flags *flag.FlagSet - k8s *flags.K8SFlags - flagSecretName string - flagInitType string - flagNamespace string - flagACLDir string - flagTokenSinkFile string + flags *flag.FlagSet + k8s *flags.K8SFlags + http *flags.HTTPFlags + + flagSecretName string + flagInitType string + flagNamespace string + flagPrimaryDatacenter string + flagACLDir string + flagTokenSinkFile string + + flagACLAuthMethod string // Auth Method to use for ACLs. + flagLogLevel string + flagLogJSON bool + + bearerTokenFile string // Location of the bearer token. Default is defaultBearerTokenFile. + flagComponentName string // Name of the component to be used as metadata to ACL Login. + + // Flags to configure Consul connection + flagServerAddresses []string + flagServerPort uint + flagConsulCACert string + flagUseHTTPS bool k8sClient kubernetes.Interface - once sync.Once - help string + once sync.Once + help string + logger hclog.Logger + providers map[string]discover.Provider - ctx context.Context + ctx context.Context + consulClient *api.Client } func (c *Command) init() { c.flags = flag.NewFlagSet("", flag.ContinueOnError) + c.flags.StringVar(&c.flagSecretName, "secret-name", "", "Name of secret to watch for an ACL token") c.flags.StringVar(&c.flagInitType, "init-type", "", "ACL init type. The only supported value is 'client'. If set to 'client' will write Consul client ACL config to an acl-config.json file in -acl-dir") - c.flags.StringVar(&c.flagNamespace, "k8s-namespace", "", - "Name of Kubernetes namespace where the servers are deployed") c.flags.StringVar(&c.flagACLDir, "acl-dir", "/consul/aclconfig", "Directory name of shared volume where client acl config file acl-config.json will be written if -init-type=client") c.flags.StringVar(&c.flagTokenSinkFile, "token-sink-file", "", "Optional filepath to write acl token") + // Flags related to using consul login to fetch the ACL token. + c.flags.StringVar(&c.flagNamespace, "k8s-namespace", "", "Name of Kubernetes namespace where the token Kubernetes secret is stored.") + c.flags.StringVar(&c.flagPrimaryDatacenter, "primary-datacenter", "", "Name of the primary datacenter when federation is enabled and the command is run in a secondary datacenter.") + c.flags.StringVar(&c.flagACLAuthMethod, "acl-auth-method", "", "Name of the auth method to login with.") + c.flags.StringVar(&c.flagComponentName, "component-name", "", + "Name of the component to pass to ACL Login as metadata.") + c.flags.Var((*flags.AppendSliceValue)(&c.flagServerAddresses), "server-address", + "The IP, DNS name or the cloud auto-join string of the Consul server(s). If providing IPs or DNS names, may be specified multiple times. "+ + "At least one value is required.") + c.flags.UintVar(&c.flagServerPort, "server-port", 8500, "The HTTP or HTTPS port of the Consul server. Defaults to 8500.") + c.flags.StringVar(&c.flagConsulCACert, "consul-ca-cert", "", + "Path to the PEM-encoded CA certificate of the Consul cluster.") + c.flags.BoolVar(&c.flagUseHTTPS, "use-https", false, + "Toggle for using HTTPS for all API calls to Consul.") + c.flags.StringVar(&c.flagLogLevel, "log-level", "info", + "Log verbosity level. Supported values (in order of detail) are \"trace\", "+ + "\"debug\", \"info\", \"warn\", and \"error\".") + c.flags.BoolVar(&c.flagLogJSON, "log-json", false, + "Enable or disable JSON output format for logging.") + c.k8s = &flags.K8SFlags{} + c.http = &flags.HTTPFlags{} flags.Merge(c.flags, c.k8s.Flags()) + flags.Merge(c.flags, c.http.Flags()) c.help = flags.Usage(help, c.flags) } func (c *Command) Run(args []string) int { + var err error c.once.Do(c.init) - if err := c.flags.Parse(args); err != nil { + if err = c.flags.Parse(args); err != nil { return 1 } if len(c.flags.Args()) > 0 { @@ -66,6 +118,20 @@ func (c *Command) Run(args []string) int { return 1 } + if c.bearerTokenFile == "" { + c.bearerTokenFile = defaultBearerTokenFile + } + // This allows us to utilize the default path of `/consul/login/acl-token` for the ACL token + // but only in the case of when we're using ACL.Login. If flagACLAuthMethod is not set and + // the tokenSinkFile is also unset it means we do not want to write an ACL token in the case + // of the client token. + if c.flagTokenSinkFile == "" && c.flagACLAuthMethod != "" { + c.flagTokenSinkFile = defaultTokenSinkFile + } + if c.flagNamespace == "" { + c.flagNamespace = corev1.NamespaceDefault + } + if c.ctx == nil { c.ctx = context.Background() } @@ -84,19 +150,75 @@ func (c *Command) Run(args []string) int { } } - // Check if the client secret exists yet - // If not, wait until it does + // Set up logging. + if c.logger == nil { + c.logger, err = common.Logger(c.flagLogLevel, c.flagLogJSON) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + } + var secret string - for { - var err error - secret, err = c.getSecret(c.flagSecretName) + if c.flagACLAuthMethod != "" { + cfg := api.DefaultConfig() + c.http.MergeOntoConfig(cfg) + + if len(c.flagServerAddresses) > 0 { + serverAddresses, err := common.GetResolvedServerAddresses(c.flagServerAddresses, c.providers, c.logger) + if err != nil { + c.UI.Error(fmt.Sprintf("Unable to discover any Consul addresses from %q: %s", c.flagServerAddresses[0], err)) + return 1 + } + + scheme := "http" + if c.flagUseHTTPS { + scheme = "https" + } + + serverAddr := fmt.Sprintf("%s:%d", serverAddresses[0], c.flagServerPort) + cfg.Address = serverAddr + cfg.Scheme = scheme + } + + c.consulClient, err = consul.NewClient(cfg) if err != nil { - c.UI.Error(fmt.Sprintf("Error getting Kubernetes secret: %s", err)) + c.logger.Error("Unable to get client connection", "error", err) + return 1 + } + + loginParams := common.LoginParams{ + AuthMethod: c.flagACLAuthMethod, + Datacenter: c.flagPrimaryDatacenter, + BearerTokenFile: c.bearerTokenFile, + TokenSinkFile: c.flagTokenSinkFile, + Meta: map[string]string{ + "component": c.flagComponentName, + }, } - if err == nil { - break + secret, err = common.ConsulLogin(c.consulClient, loginParams, c.logger) + if err != nil { + c.logger.Error("Consul login failed", "error", err) + return 1 + } + c.logger.Info("Successfully read ACL token from the server") + } else { + // Use k8s secret to obtain token. + + // Check if the client secret exists yet + // If not, wait until it does. + for { + var err error + secret, err = c.getSecret(c.flagSecretName) + if err != nil { + c.logger.Error("Error getting Kubernetes secret: ", "error", err) + } + if err == nil { + c.logger.Info("Successfully read Kubernetes secret") + break + } + time.Sleep(1 * time.Second) } - time.Sleep(1 * time.Second) } if c.flagInitType == "client" { @@ -106,7 +228,7 @@ func (c *Command) Run(args []string) int { tpl := template.Must(template.New("root").Parse(strings.TrimSpace(clientACLConfigTpl))) err := tpl.Execute(&buf, secret) if err != nil { - c.UI.Error(fmt.Sprintf("Error creating template: %s", err)) + c.logger.Error("Error creating template", "error", err) return 1 } @@ -115,7 +237,7 @@ func (c *Command) Run(args []string) int { // to be readable by the consul user. err = ioutil.WriteFile(filepath.Join(c.flagACLDir, "acl-config.json"), buf.Bytes(), 0644) if err != nil { - c.UI.Error(fmt.Sprintf("Error writing config file: %s", err)) + c.logger.Error("Error writing config file", "error", err) return 1 } } @@ -125,7 +247,7 @@ func (c *Command) Run(args []string) int { // to have permissions to overwrite our file. err := ioutil.WriteFile(c.flagTokenSinkFile, []byte(secret), 0600) if err != nil { - c.UI.Error(fmt.Sprintf("Error writing token to file %q: %s", c.flagTokenSinkFile, err)) + c.logger.Error("Error writing token to file", "file", c.flagTokenSinkFile, "error", err) return 1 } } diff --git a/control-plane/subcommand/acl-init/command_test.go b/control-plane/subcommand/acl-init/command_test.go index 0a3a7ab8bf..2f392d8155 100644 --- a/control-plane/subcommand/acl-init/command_test.go +++ b/control-plane/subcommand/acl-init/command_test.go @@ -1,13 +1,20 @@ package aclinit import ( + "bytes" "context" + "fmt" "io/ioutil" "os" "path/filepath" + "strings" "testing" + "text/template" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" @@ -15,6 +22,10 @@ import ( "k8s.io/client-go/kubernetes/fake" ) +const ( + componentAuthMethod = "consul-k8s-component-auth-method" +) + // Test that we write the secret data to a file. func TestRun_TokenSinkFile(t *testing.T) { t.Parallel() @@ -50,7 +61,6 @@ func TestRun_TokenSinkFile(t *testing.T) { k8sClient: k8s, } code := cmd.Run([]string{ - "-k8s-namespace", k8sNS, "-token-sink-file", sinkFile, "-secret-name", secretName, }) @@ -91,15 +101,11 @@ func TestRun_TokenSinkFileErr(t *testing.T) { k8sClient: k8s, } code := cmd.Run([]string{ - "-k8s-namespace", k8sNS, "-token-sink-file", "/this/filepath/does/not/exist", "-secret-name", secretName, }) require.Equal(1, code) - require.Contains(ui.ErrorWriter.String(), - `Error writing token to file "/this/filepath/does/not/exist": open /this/filepath/does/not/exist: no such file or directory`, - ) } // Test that if the command is run twice it succeeds. This test is the result @@ -142,7 +148,6 @@ func TestRun_TokenSinkFileTwice(t *testing.T) { // Run twice. for i := 0; i < 2; i++ { code := cmd.Run([]string{ - "-k8s-namespace", k8sNS, "-token-sink-file", sinkFile, "-secret-name", secretName, }) @@ -153,3 +158,192 @@ func TestRun_TokenSinkFileTwice(t *testing.T) { require.Equal(token, string(bytes), "exp: %s, got: %s", token, string(bytes)) } } + +// TestRun_PerformsConsulLogin executes the consul login path and validates the token +// is written to disk. +func TestRun_PerformsConsulLogin(t *testing.T) { + // This is the test file that we will write the token to so consul-logout can read it. + tokenFile := common.WriteTempFile(t, "") + bearerFile := common.WriteTempFile(t, test.ServiceAccountJWTToken) + + k8s := fake.NewSimpleClientset() + + // Start Consul server with ACLs enabled and default deny policy. + masterToken := "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586" + server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.ACL.Enabled = true + c.ACL.DefaultPolicy = "deny" + c.ACL.Tokens.InitialManagement = masterToken + }) + require.NoError(t, err) + defer server.Stop() + server.WaitForLeader(t) + cfg := &api.Config{ + Scheme: "http", + Address: server.HTTPAddr, + Token: masterToken, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + + // Set up the Component Auth Method, this pre-loads Consul with bindingrule, roles and an acl:write policy so we + // can issue an ACL.Login(). + test.SetupK8sComponentAuthMethod(t, consulClient, "test-sa", "default") + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + k8sClient: k8s, + bearerTokenFile: bearerFile, + } + + code := cmd.Run([]string{ + "-token-sink-file", tokenFile, + "-acl-auth-method", componentAuthMethod, + "-component-name", "foo", + "-http-addr", fmt.Sprintf("%s://%s", cfg.Scheme, cfg.Address), + }) + require.Equal(t, 0, code, ui.ErrorWriter.String()) + // Validate the Token got written. + tokenBytes, err := ioutil.ReadFile(tokenFile) + require.NoError(t, err) + require.Equal(t, 36, len(tokenBytes)) + // Validate the Token and its Description. + tok, _, err := consulClient.ACL().TokenReadSelf(&api.QueryOptions{Token: string(tokenBytes)}) + require.NoError(t, err) + require.Equal(t, "token created via login: {\"component\":\"foo\"}", tok.Description) +} + +// TestRun_WithAclAuthMethodDefinedWritesConfigJsonWithTokenMatchingSinkFile +// executes the consul login path and validates the token is written to +// acl-config.json and matches the token written to sink file. +func TestRun_WithAclAuthMethodDefined_WritesConfigJson_WithTokenMatchingSinkFile(t *testing.T) { + tokenFile := common.WriteTempFile(t, "") + bearerFile := common.WriteTempFile(t, test.ServiceAccountJWTToken) + tmpDir, err := ioutil.TempDir("", "") + require.NoError(t, err) + t.Cleanup(func() { + os.Remove(tokenFile) + os.RemoveAll(tmpDir) + }) + + k8s := fake.NewSimpleClientset() + + // Start Consul server with ACLs enabled and default deny policy. + masterToken := "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586" + + server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.ACL.Enabled = true + c.ACL.DefaultPolicy = "deny" + c.ACL.Tokens.InitialManagement = masterToken + }) + require.NoError(t, err) + defer server.Stop() + server.WaitForLeader(t) + cfg := &api.Config{ + Scheme: "http", + Address: server.HTTPAddr, + Token: masterToken, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + + // Set up the Component Auth Method, this pre-loads Consul with bindingrule, + // roles and an acl:write policy so we can issue an ACL.Login(). + test.SetupK8sComponentAuthMethod(t, consulClient, "test-sa", "default") + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + k8sClient: k8s, + bearerTokenFile: bearerFile, + } + + code := cmd.Run([]string{ + "-token-sink-file", tokenFile, + "-acl-auth-method", componentAuthMethod, + "-component-name", "foo", + "-http-addr", fmt.Sprintf("%s://%s", cfg.Scheme, cfg.Address), + "-init-type", "client", + "-acl-dir", tmpDir, + }) + require.Equal(t, 0, code, ui.ErrorWriter.String()) + // Validate the ACL Config file got written. + aclConfigBytes, err := ioutil.ReadFile(fmt.Sprintf("%s/acl-config.json", tmpDir)) + require.NoError(t, err) + // Validate the Token Sink File got written. + sinkFileToken, err := ioutil.ReadFile(tokenFile) + require.NoError(t, err) + // Validate the Token Sink File Matches the ACL Cconfig Token by injecting + // the token secret into the template used by the ACL config file. + var buf bytes.Buffer + tpl := template.Must(template.New("root").Parse(strings.TrimSpace(clientACLConfigTpl))) + err = tpl.Execute(&buf, string(sinkFileToken)) + require.NoError(t, err) + expectedAclConfig := buf.String() + + require.Equal(t, expectedAclConfig, string(aclConfigBytes)) +} + +// TestRun_WithAclAuthMethodDefinedWritesConfigJsonWithTokenMatchingSinkFile +// executes the k8s secret path and validates the token is written to +// acl-config.json and matches the token written to sink file. +func TestRun_WithoutAclAuthMethodDefined_WritesConfigJsonWithTokenMatchingSinkFile(t *testing.T) { + t.Parallel() + require := require.New(t) + tmpDir, err := ioutil.TempDir("", "") + require.NoError(err) + + t.Cleanup(func() { + os.RemoveAll(tmpDir) + }) + + // Set up k8s with the secret. + token := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" + k8sNS := "default" + secretName := "secret-name" + k8s := fake.NewSimpleClientset() + _, err = k8s.CoreV1().Secrets(k8sNS).Create( + context.Background(), + &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Labels: map[string]string{common.CLILabelKey: common.CLILabelValue}, + }, + Data: map[string][]byte{ + "token": []byte(token), + }, + }, + metav1.CreateOptions{}) + + require.NoError(err) + + sinkFile := filepath.Join(tmpDir, "acl-token") + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + k8sClient: k8s, + } + code := cmd.Run([]string{ + "-token-sink-file", sinkFile, + "-secret-name", secretName, + "-init-type", "client", + "-acl-dir", tmpDir, + }) + // Validate the ACL Config file got written. + aclConfigBytes, err := ioutil.ReadFile(fmt.Sprintf("%s/acl-config.json", tmpDir)) + require.NoError(err) + // Validate the Token Sink File got written. + require.Equal(0, code, ui.ErrorWriter.String()) + sinkFileToken, err := ioutil.ReadFile(sinkFile) + require.NoError(err) + // Validate the Token Sink File Matches the ACL Cconfig Token by injecting + // the token secret into the template used by the ACL config file. + var buf bytes.Buffer + tpl := template.Must(template.New("root").Parse(strings.TrimSpace(clientACLConfigTpl))) + err = tpl.Execute(&buf, string(sinkFileToken)) + require.NoError(err) + expectedAclConfig := buf.String() + + require.Equal(expectedAclConfig, string(aclConfigBytes)) +} diff --git a/control-plane/subcommand/common/common.go b/control-plane/subcommand/common/common.go index bd60b5822c..a5da023fc3 100644 --- a/control-plane/subcommand/common/common.go +++ b/control-plane/subcommand/common/common.go @@ -7,7 +7,9 @@ import ( "os" "strconv" "strings" + "time" + "github.com/cenkalti/backoff" "github.com/go-logr/logr" godiscover "github.com/hashicorp/consul-k8s/control-plane/helper/go-discover" "github.com/hashicorp/consul/api" @@ -31,6 +33,12 @@ const ( // which secrets to delete on an uninstall. CLILabelKey = "managed-by" CLILabelValue = "consul-k8s" + + // The number of times to attempt ACL Login. + numLoginRetries = 100 + + raftReplicationTimeout = 2 * time.Second + tokenReadPollingInterval = 100 * time.Millisecond ) // Logger returns an hclog instance with log level set and JSON logging enabled/disabled, or an error if level is invalid. @@ -77,35 +85,117 @@ func ValidateUnprivilegedPort(flagName, flagValue string) error { return nil } +// LoginParams are parameters used to log in to consul. +type LoginParams struct { + // AuthMethod is the name of the auth method. + AuthMethod string + // Datacenter is the datacenter for the login request. + Datacenter string + // Namespace is the namespace for the login request. + Namespace string + // BearerTokenFile is the file where the bearer token is stored. + BearerTokenFile string + // TokenSinkFile is the file where to write the token received from Consul. + TokenSinkFile string + // Meta is the metadata to set on the token. + Meta map[string]string + + // numRetries is only used in tests to make them run faster. + numRetries uint64 +} + // ConsulLogin issues an ACL().Login to Consul and writes out the token to tokenSinkFile. // The logic of this is taken from the `consul login` command. -func ConsulLogin(client *api.Client, bearerTokenFile, authMethodName, tokenSinkFile, namespace string, meta map[string]string) error { - if meta == nil { - return fmt.Errorf("invalid meta") - } - data, err := ioutil.ReadFile(bearerTokenFile) +func ConsulLogin(client *api.Client, params LoginParams, log hclog.Logger) (string, error) { + // Read the bearerTokenFile. + data, err := ioutil.ReadFile(params.BearerTokenFile) if err != nil { - return fmt.Errorf("unable to read bearerTokenFile: %v, err: %v", bearerTokenFile, err) + return "", fmt.Errorf("unable to read bearer token file: %v, err: %v", params.BearerTokenFile, err) } bearerToken := strings.TrimSpace(string(data)) if bearerToken == "" { - return fmt.Errorf("no bearer token found in %s", bearerTokenFile) + return "", fmt.Errorf("no bearer token found in %q", params.BearerTokenFile) } - // Do the login. - req := &api.ACLLoginParams{ - AuthMethod: authMethodName, - BearerToken: bearerToken, - Meta: meta, + + if params.numRetries == 0 { + params.numRetries = numLoginRetries } - tok, _, err := client.ACL().Login(req, &api.WriteOptions{Namespace: namespace}) + var token *api.ACLToken + err = backoff.Retry(func() error { + // Do the login. + req := &api.ACLLoginParams{ + AuthMethod: params.AuthMethod, + BearerToken: bearerToken, + Meta: params.Meta, + } + // The datacenter flag will either have the value of the primary datacenter or "". In case of the latter, + // the token will be created in the datacenter of the installation. In case a global token is required, + // the token will be created in the primary datacenter. + token, _, err = client.ACL().Login(req, &api.WriteOptions{Namespace: params.Namespace, Datacenter: params.Datacenter}) + if err != nil { + log.Error("unable to login", "error", err) + return fmt.Errorf("error logging in: %s", err) + } + if params.TokenSinkFile != "" { + // Write out the resultant token file. + // Must be 0644 because this is written by the consul-k8s user but needs + // to be readable by the consul user + if err = WriteFileWithPerms(params.TokenSinkFile, token.SecretID, 0644); err != nil { + return fmt.Errorf("error writing token to file sink: %v", err) + } + } + return err + }, backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), params.numRetries)) if err != nil { - return fmt.Errorf("error logging in: %s", err) + log.Error("Hit maximum retries for consul login", "error", err) + return "", err } - if err := WriteFileWithPerms(tokenSinkFile, tok.SecretID, 0444); err != nil { - return fmt.Errorf("error writing token to file sink: %v", err) + log.Info("Consul login complete") + + // A workaround to check that the ACL token is replicated to other Consul servers. + // + // A consul client may reach out to a follower instead of a leader to resolve the token for an API call + // with that token. This is because clients talk to servers in the stale consistency mode + // to decrease the load on the servers (see https://www.consul.io/docs/architecture/consensus#stale). + // In that case, it's possible that the token isn't replicated + // to that server instance yet. The client will then get an "ACL not found" error + // and subsequently cache this not found response. Then on any API call with the token, + // we will keep hitting the same "ACL not found" error + // until the cache entry expires (determined by the `acl_token_ttl` which defaults to 30 seconds). + // This is not great because it will delay app start up time by 30 seconds in most cases + // (if you are running 3 servers, then the probability of ending up on a follower is close to 2/3). + // + // To help with that, we try to first read the token in the stale consistency mode until we + // get a successful response. This should not take more than 100ms because raft replication + // should in most cases take less than that (see https://www.consul.io/docs/install/performance#read-write-tuning) + // but we set the timeout to 2s to be sure. + // + // Note though that this workaround does not eliminate this problem completely. It's still possible + // for this call and the next call to reach different servers and those servers to have different + // states from each other. + // For example, this call can reach a leader and succeed, while the next call can go to a follower + // that is still behind the leader and get an "ACL not found" error. + // However, this is a pretty unlikely case because + // clients have sticky connections to a server, and those connections get rebalanced only every 2-3min. + // And so, this workaround should work in a vast majority of cases. + log.Info("Checking that the ACL token exists when reading it in the stale consistency mode") + // Use raft timeout and polling interval to determine the number of retries. + numTokenReadRetries := uint64(raftReplicationTimeout.Milliseconds() / tokenReadPollingInterval.Milliseconds()) + err = backoff.Retry(func() error { + _, _, err = client.ACL().TokenReadSelf(&api.QueryOptions{AllowStale: true, Token: token.SecretID}) + if err != nil { + log.Error("Unable to read ACL token; retrying", "err", err) + } + return err + }, backoff.WithMaxRetries(backoff.NewConstantBackOff(tokenReadPollingInterval), numTokenReadRetries)) + if err != nil { + log.Error("Unable to read ACL token from a Consul server; "+ + "please check that your server cluster is healthy", "err", err) + return "", err } - return nil + log.Info("Successfully read ACL token from the server") + return token.SecretID, nil } // WriteFileWithPerms will write payload as the contents of the outputFile and set permissions after writing the contents. This function is necessary since using ioutil.WriteFile() alone will create the new file with the requested permissions prior to actually writing the file, so you can't set read-only permissions. diff --git a/control-plane/subcommand/common/common_test.go b/control-plane/subcommand/common/common_test.go index 179d10a114..a58fbfe441 100644 --- a/control-plane/subcommand/common/common_test.go +++ b/control-plane/subcommand/common/common_test.go @@ -54,20 +54,112 @@ func TestValidateUnprivilegedPort(t *testing.T) { // TestConsulLogin ensures that our implementation of consul login hits `/v1/acl/login`. func TestConsulLogin(t *testing.T) { t.Parallel() - require := require.New(t) - counter := 0 bearerTokenFile := WriteTempFile(t, "foo") tokenFile := WriteTempFile(t, "") - client := startMockServer(t, &counter) - err := ConsulLogin(client, bearerTokenFile, testAuthMethod, tokenFile, "", testPodMeta) - require.NoError(err) - require.Equal(counter, 1) + // This is a common.Logger. + log, err := Logger("INFO", false) + require.NoError(t, err) + client := startMockServer(t) + params := LoginParams{ + AuthMethod: testAuthMethod, + Datacenter: "dc1", + BearerTokenFile: bearerTokenFile, + TokenSinkFile: tokenFile, + } + _, err = ConsulLogin(client, params, log) + require.NoError(t, err) // Validate that the token file was written to disk. data, err := ioutil.ReadFile(tokenFile) - require.NoError(err) - require.Equal(string(data), "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586") + require.NoError(t, err) + require.Equal(t, string(data), "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586") +} + +// TestConsulLogin_Retries tests we retry /v1/acl/login call if it fails. +func TestConsulLogin_Retries(t *testing.T) { + t.Parallel() + + numLoginCalls := 0 + bearerTokenFile := WriteTempFile(t, "foo") + tokenFile := WriteTempFile(t, "") + + // This is a common.Logger. + log, err := Logger("INFO", false) + require.NoError(t, err) + // Start the Consul server. + consulServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Record all the API calls made. + if r != nil && r.URL.Path == "/v1/acl/login" && r.Method == "POST" { + if numLoginCalls == 0 { + w.WriteHeader(500) + } else { + w.Write([]byte(testLoginResponse)) + } + numLoginCalls++ + } + if r != nil && r.URL.Path == "/v1/acl/token/self" && r.Method == "GET" { + w.Write([]byte(testLoginResponse)) + } + })) + t.Cleanup(consulServer.Close) + + serverURL, err := url.Parse(consulServer.URL) + require.NoError(t, err) + clientConfig := &api.Config{Address: serverURL.String()} + client, err := api.NewClient(clientConfig) + require.NoError(t, err) + params := LoginParams{ + AuthMethod: testAuthMethod, + Datacenter: "dc1", + BearerTokenFile: bearerTokenFile, + TokenSinkFile: tokenFile, + } + _, err = ConsulLogin(client, params, log) + require.NoError(t, err) + require.Equal(t, 2, numLoginCalls) + // Validate that the token file was written to disk. + data, err := ioutil.ReadFile(tokenFile) + require.NoError(t, err) + require.Equal(t, string(data), "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586") +} + +// TestConsulLogin_TokenNotReplicated tests that if we can't read the token in stale consistency mode +// we return an error. +func TestConsulLogin_TokenNotReplicated(t *testing.T) { + t.Parallel() + + bearerTokenFile := WriteTempFile(t, "foo") + tokenFile := WriteTempFile(t, "") + + // This is a common.Logger. + log, err := Logger("INFO", false) + require.NoError(t, err) + // Start the Consul server. + consulServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Record all the API calls made. + if r != nil && r.URL.Path == "/v1/acl/login" && r.Method == "POST" { + w.Write([]byte(testLoginResponse)) + } + if r != nil && r.URL.Path == "/v1/acl/token/self" && r.Method == "GET" { + w.WriteHeader(500) + } + })) + t.Cleanup(consulServer.Close) + + serverURL, err := url.Parse(consulServer.URL) + require.NoError(t, err) + clientConfig := &api.Config{Address: serverURL.String()} + client, err := api.NewClient(clientConfig) + require.NoError(t, err) + params := LoginParams{ + AuthMethod: testAuthMethod, + Datacenter: "dc1", + BearerTokenFile: bearerTokenFile, + TokenSinkFile: tokenFile, + } + _, err = ConsulLogin(client, params, log) + require.EqualError(t, err, "Unexpected response code: 500 ()") } func TestConsulLogin_EmptyBearerTokenFile(t *testing.T) { @@ -75,48 +167,41 @@ func TestConsulLogin_EmptyBearerTokenFile(t *testing.T) { require := require.New(t) bearerTokenFile := WriteTempFile(t, "") - err := ConsulLogin( - nil, - bearerTokenFile, - testAuthMethod, - "", - "", - testPodMeta, - ) - require.EqualError(err, fmt.Sprintf("no bearer token found in %s", bearerTokenFile)) + params := LoginParams{ + BearerTokenFile: bearerTokenFile, + } + _, err := ConsulLogin(nil, params, hclog.NewNullLogger()) + require.EqualError(err, fmt.Sprintf("no bearer token found in %q", bearerTokenFile)) } func TestConsulLogin_BearerTokenFileDoesNotExist(t *testing.T) { t.Parallel() require := require.New(t) randFileName := fmt.Sprintf("/foo/%d/%d", rand.Int(), rand.Int()) - err := ConsulLogin( - nil, - randFileName, - testAuthMethod, - "", - "", - testPodMeta, - ) + params := LoginParams{ + BearerTokenFile: randFileName, + } + _, err := ConsulLogin(nil, params, hclog.NewNullLogger()) require.Error(err) - require.Contains(err.Error(), "unable to read bearerTokenFile") + require.Contains(err.Error(), "unable to read bearer token file") } func TestConsulLogin_TokenFileUnwritable(t *testing.T) { t.Parallel() require := require.New(t) - counter := 0 bearerTokenFile := WriteTempFile(t, "foo") - client := startMockServer(t, &counter) + client := startMockServer(t) + // This is a common.Logger. + log, err := Logger("INFO", false) + require.NoError(err) randFileName := fmt.Sprintf("/foo/%d/%d", rand.Int(), rand.Int()) - err := ConsulLogin( - client, - bearerTokenFile, - testAuthMethod, - randFileName, - "", - testPodMeta, - ) + params := LoginParams{ + AuthMethod: testAuthMethod, + BearerTokenFile: bearerTokenFile, + TokenSinkFile: randFileName, + numRetries: 2, + } + _, err = ConsulLogin(client, params, log) require.Error(err) require.Contains(err.Error(), "error writing token to file sink") } @@ -214,15 +299,16 @@ func TestGetResolvedServerAddresses(t *testing.T) { // startMockServer starts an httptest server used to mock a Consul server's // /v1/acl/login endpoint. apiCallCounter will be incremented on each call to /v1/acl/login. // It returns a consul client pointing at the server. -func startMockServer(t *testing.T, apiCallCounter *int) *api.Client { - +func startMockServer(t *testing.T) *api.Client { // Start the Consul server. consulServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Record all the API calls made. if r != nil && r.URL.Path == "/v1/acl/login" && r.Method == "POST" { - *apiCallCounter++ + w.Write([]byte(testLoginResponse)) + } + if r != nil && r.URL.Path == "/v1/acl/token/self" && r.Method == "GET" { + w.Write([]byte(testLoginResponse)) } - w.Write([]byte(testLoginResponse)) })) t.Cleanup(consulServer.Close) @@ -258,5 +344,3 @@ const testLoginResponse = `{ "CreateIndex": 36, "ModifyIndex": 36 }` - -var testPodMeta = map[string]string{"pod": "default/podName"} diff --git a/control-plane/subcommand/connect-init/command.go b/control-plane/subcommand/connect-init/command.go index 01a23e9c2c..a2b1b29148 100644 --- a/control-plane/subcommand/connect-init/command.go +++ b/control-plane/subcommand/connect-init/command.go @@ -22,13 +22,8 @@ const ( defaultTokenSinkFile = "/consul/connect-inject/acl-token" defaultProxyIDFile = "/consul/connect-inject/proxyid" - // The number of times to attempt ACL Login. - numLoginRetries = 3 // The number of times to attempt to read this service (120s). defaultServicePollingRetries = 120 - - raftReplicationTimeout = 2 * time.Second - tokenReadPollingInterval = 100 * time.Millisecond ) type Command struct { @@ -84,7 +79,6 @@ func (c *Command) init() { c.http = &flags.HTTPFlags{} flags.Merge(c.flagSet, c.http.Flags()) c.help = flags.Usage(help, c.flagSet) - } func (c *Command) Run(args []string) int { @@ -129,13 +123,14 @@ func (c *Command) Run(args []string) int { if c.flagACLAuthMethod != "" { // loginMeta is the default metadata that we pass to the consul login API. loginMeta := map[string]string{"pod": fmt.Sprintf("%s/%s", c.flagPodNamespace, c.flagPodName)} - err = backoff.Retry(func() error { - err := common.ConsulLogin(consulClient, c.flagBearerTokenFile, c.flagACLAuthMethod, c.flagACLTokenSink, c.flagAuthMethodNamespace, loginMeta) - if err != nil { - c.logger.Error("Consul login failed; retrying", "error", err) - } - return err - }, backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), numLoginRetries)) + loginParams := common.LoginParams{ + AuthMethod: c.flagACLAuthMethod, + Namespace: c.flagAuthMethodNamespace, + BearerTokenFile: c.flagBearerTokenFile, + TokenSinkFile: c.flagACLTokenSink, + Meta: loginMeta, + } + token, err := common.ConsulLogin(consulClient, loginParams, c.logger) if err != nil { if c.flagServiceAccountName == "default" { c.logger.Warn("The service account name for this Pod is \"default\"." + @@ -143,60 +138,10 @@ func (c *Command) Run(args []string) int { " The service account name must match the name of the Kubernetes Service" + " or the consul.hashicorp.com/connect-service annotation.") } - c.logger.Error("Hit maximum retries for consul login", "error", err) + c.logger.Error("unable to complete login", "error", err) return 1 } - // Now update the client so that it will read the ACL token we just fetched. - cfg.TokenFile = c.flagACLTokenSink - consulClient, err = consul.NewClient(cfg) - if err != nil { - c.logger.Error("Unable to update client connection", "error", err) - return 1 - } - c.logger.Info("Consul login complete") - - // A workaround to check that the ACL token is replicated to other Consul servers. - // - // A consul client may reach out to a follower instead of a leader to resolve the token during the - // call to get services below. This is because clients talk to servers in the stale consistency mode - // to decrease the load on the servers (see https://www.consul.io/docs/architecture/consensus#stale). - // In that case, it's possible that the token isn't replicated - // to that server instance yet. The client will then get an "ACL not found" error - // and subsequently cache this not found response. Then our call below - // to get services from the agent will keep hitting the same "ACL not found" error - // until the cache entry expires (determined by the `acl_token_ttl` which defaults to 30 seconds). - // This is not great because it will delay app start up time by 30 seconds in most cases - // (if you are running 3 servers, then the probability of ending up on a follower is close to 2/3). - // - // To help with that, we try to first read the token in the stale consistency mode until we - // get a successful response. This should not take more than 100ms because raft replication - // should in most cases take less than that (see https://www.consul.io/docs/install/performance#read-write-tuning) - // but we set the timeout to 2s to be sure. - // - // Note though that this workaround does not eliminate this problem completely. It's still possible - // for this call and the next call to reach different servers and those servers to have different - // states from each other. - // For example, this call can reach a leader and succeed, while the call below can go to a follower - // that is still behind the leader and get an "ACL not found" error. - // However, this is a pretty unlikely case because - // clients have sticky connections to a server, and those connections get rebalanced only every 2-3min. - // And so, this workaround should work in a vast majority of cases. - c.logger.Info("Checking that the ACL token exists when reading it in the stale consistency mode") - // Use raft timeout and polling interval to determine the number of retries. - numTokenReadRetries := uint64(raftReplicationTimeout.Milliseconds() / tokenReadPollingInterval.Milliseconds()) - err = backoff.Retry(func() error { - _, _, err := consulClient.ACL().TokenReadSelf(&api.QueryOptions{AllowStale: true}) - if err != nil { - c.logger.Error("Unable to read ACL token; retrying", "err", err) - } - return err - }, backoff.WithMaxRetries(backoff.NewConstantBackOff(tokenReadPollingInterval), numTokenReadRetries)) - if err != nil { - c.logger.Error("Unable to read ACL token from a Consul server; "+ - "please check that your server cluster is healthy", "err", err) - return 1 - } - c.logger.Info("Successfully read ACL token from the server") + cfg.Token = token } // Now wait for the service to be registered. Do this by querying the Agent for a service @@ -204,6 +149,13 @@ func (c *Command) Run(args []string) int { var proxyID string registrationRetryCount := 0 var errServiceNameMismatch error + // We need a new client so that we can use the ACL token that was fetched during login to do the next bit, + // otherwise `consulClient` will still be using the bearerToken that was passed in. + consulClient, err = consul.NewClient(cfg) + if err != nil { + c.logger.Error("Unable to update client connection", "error", err) + return 1 + } err = backoff.Retry(func() error { registrationRetryCount++ filter := fmt.Sprintf("Meta[%q] == %q and Meta[%q] == %q ", diff --git a/control-plane/subcommand/consul-logout/command.go b/control-plane/subcommand/consul-logout/command.go new file mode 100644 index 0000000000..74ec6ccf57 --- /dev/null +++ b/control-plane/subcommand/consul-logout/command.go @@ -0,0 +1,99 @@ +package consullogout + +import ( + "flag" + "sync" + + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-hclog" + "github.com/mitchellh/cli" +) + +const ( + defaultACLTokenFile = "/consul/login/acl-token" +) + +// The consul-logout command issues a Consul logout API request to delete an ACL token. +type Command struct { + UI cli.Ui + + flagLogLevel string + flagLogJSON bool + + flagSet *flag.FlagSet + http *flags.HTTPFlags + + once sync.Once + help string + logger hclog.Logger +} + +func (c *Command) init() { + c.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + c.flagSet.StringVar(&c.flagLogLevel, "log-level", "info", + "Log verbosity level. Supported values (in order of detail) are \"trace\", "+ + "\"debug\", \"info\", \"warn\", and \"error\".") + c.flagSet.BoolVar(&c.flagLogJSON, "log-json", false, + "Enable or disable JSON output format for logging.") + + c.http = &flags.HTTPFlags{} + flags.Merge(c.flagSet, c.http.Flags()) + c.help = flags.Usage(help, c.flagSet) + +} + +func (c *Command) Run(args []string) int { + var err error + c.once.Do(c.init) + + if err := c.flagSet.Parse(args); err != nil { + return 1 + } + if c.logger == nil { + c.logger, err = common.Logger(c.flagLogLevel, c.flagLogJSON) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + } + // Set a default if it is not already set. + if c.http.TokenFile() == "" { + if err := c.http.SetTokenFile(defaultACLTokenFile); err != nil { + c.logger.Error("Unable to update client", "error", err) + return 1 + } + } + + cfg := api.DefaultConfig() + c.http.MergeOntoConfig(cfg) + consulClient, err := consul.NewClient(cfg) + if err != nil { + c.logger.Error("Unable to get client connection", "error", err) + return 1 + } + // Issue the logout. + _, err = consulClient.ACL().Logout(&api.WriteOptions{}) + if err != nil { + c.logger.Error("Unable to delete consul ACL token as logout failed", "error", err) + return 1 + } + c.logger.Error("ACL token successfully deleted") + return 0 +} + +func (c *Command) Synopsis() string { return synopsis } +func (c *Command) Help() string { + c.once.Do(c.init) + return c.help +} + +const synopsis = "Issue a consul logout to delete the ACL token." +const help = ` +Usage: consul-k8s-control-plane consul-logout [options] + + Deletes the ACL token for this pod. + Not intended for stand-alone use. +` diff --git a/control-plane/subcommand/consul-logout/command_test.go b/control-plane/subcommand/consul-logout/command_test.go new file mode 100644 index 0000000000..5ff4c216be --- /dev/null +++ b/control-plane/subcommand/consul-logout/command_test.go @@ -0,0 +1,153 @@ +package consullogout + +import ( + "fmt" + "math/rand" + "os" + "testing" + + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +// TestRun_InvalidSinkFile validates that we correctly fail in case the token sink file +// does not exist. +func TestRun_InvalidSinkFile(t *testing.T) { + t.Parallel() + randFileName := fmt.Sprintf("/foo/%d/%d", rand.Int(), rand.Int()) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + code := cmd.Run([]string{ + "-token-file", randFileName, + }) + require.Equal(t, 1, code) +} + +// Test_UnableToLogoutDueToInvalidToken checks the error path for when Consul is not +// aware of an ACL token. This is a big corner case but covers the rare occurrance that +// the preStop hook where `consul-logout` is run might be executed more than once by Kubelet. +// This also covers obscure cases where the acl-token file is corrupted somehow. +func Test_UnableToLogoutDueToInvalidToken(t *testing.T) { + tokenFile := fmt.Sprintf("/tmp/%d1", rand.Int()) + t.Cleanup(func() { + os.Remove(tokenFile) + }) + + var caFile, certFile, keyFile string + // Start Consul server with ACLs enabled and default deny policy. + masterToken := "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586" + server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.ACL.Enabled = true + c.ACL.DefaultPolicy = "deny" + c.ACL.Tokens.InitialManagement = masterToken + caFile, certFile, keyFile = test.GenerateServerCerts(t) + c.CAFile = caFile + c.CertFile = certFile + c.KeyFile = keyFile + }) + require.NoError(t, err) + defer server.Stop() + server.WaitForLeader(t) + cfg := &api.Config{ + Address: server.HTTPSAddr, + Scheme: "https", + Token: masterToken, + TLSConfig: api.TLSConfig{ + CAFile: caFile, + }, + } + require.NoError(t, err) + + bogusToken := "00000000-00-0-001110aacddbderf" + err = os.WriteFile(tokenFile, []byte(bogusToken), 0444) + require.NoError(t, err) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + // Run the command. + code := cmd.Run([]string{ + "-http-addr", fmt.Sprintf("%s://%s", cfg.Scheme, cfg.Address), + "-token-file", tokenFile, + }) + require.Equal(t, 1, code, ui.ErrorWriter.String()) + require.Contains(t, "Unexpected response code: 403 (ACL not found)", ui.ErrorWriter.String()) +} + +// Test_RunUsingLogin creates an AuthMethod and issues an ACL Token via ACL().Login() +// which is the code path that is taken to provision the ACL tokens at runtime through +// subcommand/acl-init. It then runs `consul-logout` and ensures that the ACL token +// is properly destroyed. +func Test_RunUsingLogin(t *testing.T) { + // This is the test file that we will write the token to so consul-logout can read it. + tokenFile := fmt.Sprintf("/tmp/%d1", rand.Int()) + t.Cleanup(func() { + os.Remove(tokenFile) + }) + + // Start Consul server with ACLs enabled and default deny policy. + masterToken := "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586" + server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.ACL.Enabled = true + c.ACL.DefaultPolicy = "deny" + c.ACL.Tokens.InitialManagement = masterToken + }) + require.NoError(t, err) + defer server.Stop() + server.WaitForLeader(t) + cfg := &api.Config{ + Address: server.HTTPAddr, + Scheme: "http", + Token: masterToken, + } + consulClient, err := consul.NewClient(cfg) + require.NoError(t, err) + + // We are not setting up the Component Auth Method here because testing logout + // does not need to use the auth method and this auth method can still issue a login. + test.SetupK8sAuthMethod(t, consulClient, "test-sa", "default") + + // Do the login. + req := &api.ACLLoginParams{ + AuthMethod: test.AuthMethod, + BearerToken: test.ServiceAccountJWTToken, + Meta: map[string]string{}, + } + token, _, err := consulClient.ACL().Login(req, &api.WriteOptions{}) + require.NoError(t, err) + + // Validate that the token was created. + tok, _, err := consulClient.ACL().TokenRead(token.AccessorID, &api.QueryOptions{}) + require.NoError(t, err) + + // Write the token's SecretID to the tokenFile which mimics loading + // the ACL token from subcommand/acl-init path. + err = os.WriteFile(tokenFile, []byte(token.SecretID), 0444) + require.NoError(t, err) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + // Run the command. + code := cmd.Run([]string{ + "-http-addr", fmt.Sprintf("%s://%s", cfg.Scheme, cfg.Address), + "-token-file", tokenFile, + }) + require.Equal(t, 0, code, ui.ErrorWriter.String()) + + // Validate the ACL token was destroyed. + noTok, _, err := consulClient.ACL().TokenReadSelf(&api.QueryOptions{Token: tok.SecretID}) + require.Error(t, err) + require.Nil(t, noTok) +} diff --git a/control-plane/subcommand/partition-init/command.go b/control-plane/subcommand/partition-init/command.go index 8846d42efc..6ef0471fe2 100644 --- a/control-plane/subcommand/partition-init/command.go +++ b/control-plane/subcommand/partition-init/command.go @@ -28,11 +28,9 @@ type Command struct { flagPartitionName string // Flags to configure Consul connection - flagServerAddresses []string - flagServerPort uint - flagConsulCACert string - flagConsulTLSServerName string - flagUseHTTPS bool + flagServerAddresses []string + flagServerPort uint + flagUseHTTPS bool flagLogLevel string flagLogJSON bool @@ -60,10 +58,6 @@ func (c *Command) init() { "The IP, DNS name or the cloud auto-join string of the Consul server(s). If providing IPs or DNS names, may be specified multiple times. "+ "At least one value is required.") c.flags.UintVar(&c.flagServerPort, "server-port", 8500, "The HTTP or HTTPS port of the Consul server. Defaults to 8500.") - c.flags.StringVar(&c.flagConsulCACert, "consul-ca-cert", "", - "Path to the PEM-encoded CA certificate of the Consul cluster.") - c.flags.StringVar(&c.flagConsulTLSServerName, "consul-tls-server-name", "", - "The server name to set as the SNI header when sending HTTPS requests to Consul.") c.flags.BoolVar(&c.flagUseHTTPS, "use-https", false, "Toggle for using HTTPS for all API calls to Consul.") c.flags.DurationVar(&c.flagTimeout, "timeout", 10*time.Minute, @@ -134,14 +128,11 @@ func (c *Command) Run(args []string) int { } // For all of the next operations we'll need a Consul client. serverAddr := fmt.Sprintf("%s:%d", serverAddresses[0], c.flagServerPort) - consulClient, err := consul.NewClient(&api.Config{ - Address: serverAddr, - Scheme: scheme, - TLSConfig: api.TLSConfig{ - Address: c.flagConsulTLSServerName, - CAFile: c.flagConsulCACert, - }, - }) + cfg := api.DefaultConfig() + cfg.Address = serverAddr + cfg.Scheme = scheme + c.http.MergeOntoConfig(cfg) + consulClient, err := consul.NewClient(cfg) if err != nil { c.UI.Error(fmt.Sprintf("Error creating Consul client for addr %q: %s", serverAddr, err)) return 1 diff --git a/control-plane/subcommand/server-acl-init/command.go b/control-plane/subcommand/server-acl-init/command.go index 64906d44ad..495ec7beda 100644 --- a/control-plane/subcommand/server-acl-init/command.go +++ b/control-plane/subcommand/server-acl-init/command.go @@ -11,6 +11,11 @@ import ( "sync" "time" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/subcommand" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" + k8sflags "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" "github.com/hashicorp/consul/api" "github.com/hashicorp/go-discover" "github.com/hashicorp/go-hclog" @@ -19,12 +24,6 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-k8s/control-plane/subcommand" - "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" - "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" - k8sflags "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" ) type Command struct { @@ -38,26 +37,28 @@ type Command struct { flagAllowDNS bool - flagCreateClientToken bool + flagSetServerTokens bool - flagCreateSyncToken bool + flagClient bool + + flagSyncCatalog bool flagSyncConsulNodeName string - flagCreateInjectToken bool - flagInjectAuthMethodHost string - flagBindingRuleSelector string + flagConnectInject bool + flagAuthMethodHost string + flagBindingRuleSelector string - flagCreateControllerToken bool + flagController bool flagCreateEntLicenseToken bool - flagCreateSnapshotAgentToken bool + flagSnapshotAgent bool - flagCreateMeshGatewayToken bool + flagMeshGateway bool flagIngressGatewayNames []string flagTerminatingGatewayNames []string - flagCreateAPIGatewayToken bool + flagAPIGatewayController bool // Flags to configure Consul connection. flagServerAddresses []string @@ -71,8 +72,9 @@ type Command struct { flagACLReplicationTokenFile string // Flags to support partitions. - flagEnablePartitions bool // true if Admin Partitions are enabled - flagPartitionName string // name of the Admin Partition + flagEnablePartitions bool // true if Admin Partitions are enabled + flagPartitionName string // name of the Admin Partition + flagPartitionTokenFile string // Flags to support namespaces. flagEnableNamespaces bool // Use namespacing on all components @@ -117,45 +119,36 @@ func (c *Command) init() { c.flags.StringVar(&c.flagK8sNamespace, "k8s-namespace", "", "Name of Kubernetes namespace where Consul and consul-k8s components are deployed.") + c.flags.BoolVar(&c.flagSetServerTokens, "set-server-tokens", true, "Toggle for setting agent tokens for the servers.") + c.flags.BoolVar(&c.flagAllowDNS, "allow-dns", false, "Toggle for updating the anonymous token to allow DNS queries to work") - c.flags.BoolVar(&c.flagCreateClientToken, "create-client-token", true, + c.flags.BoolVar(&c.flagClient, "client", true, "Toggle for creating a client agent token. Default is true.") - c.flags.BoolVar(&c.flagCreateSyncToken, "create-sync-token", false, - "Toggle for creating a catalog sync token.") + c.flags.BoolVar(&c.flagSyncCatalog, "sync-catalog", false, + "Toggle for configuring ACL login for sync catalog.") c.flags.StringVar(&c.flagSyncConsulNodeName, "sync-consul-node-name", "k8s-sync", "The Consul node name to register for catalog sync. Defaults to k8s-sync. To be discoverable "+ "via DNS, the name should only contain alpha-numerics and dashes.") - // Previously when this flag was set, -enable-namespaces and -create-inject-auth-method - // were always passed, so now we just look at those flags and ignore - // this flag. We keep the flag here though so there's no error if it's - // passed. - var unused bool - c.flags.BoolVar(&unused, "create-inject-namespace-token", false, - "Toggle for creating a connect injector token. Only required when namespaces are enabled. "+ - "Deprecated: set -enable-namespaces and -create-inject-token instead.") - - c.flags.BoolVar(&c.flagCreateInjectToken, "create-inject-auth-method", false, - "Toggle for creating a connect inject auth method. Deprecated: use -create-inject-token instead.") - c.flags.BoolVar(&c.flagCreateInjectToken, "create-inject-token", false, - "Toggle for creating a connect inject auth method and an ACL token.") - c.flags.StringVar(&c.flagInjectAuthMethodHost, "inject-auth-method-host", "", + c.flags.BoolVar(&c.flagConnectInject, "connect-inject", false, + "Toggle for configuring ACL login for Connect inject.") + c.flags.StringVar(&c.flagAuthMethodHost, "auth-method-host", "", "Kubernetes Host config parameter for the auth method."+ "If not provided, the default cluster Kubernetes service will be used.") c.flags.StringVar(&c.flagBindingRuleSelector, "acl-binding-rule-selector", "", "Selector string for connectInject ACL Binding Rule.") - c.flags.BoolVar(&c.flagCreateControllerToken, "create-controller-token", false, - "Toggle for creating a token for the controller.") + c.flags.BoolVar(&c.flagController, "controller", false, + "Toggle for configuring ACL login for the controller.") c.flags.BoolVar(&c.flagCreateEntLicenseToken, "create-enterprise-license-token", false, "Toggle for creating a token for the enterprise license job.") - c.flags.BoolVar(&c.flagCreateSnapshotAgentToken, "create-snapshot-agent-token", false, - "[Enterprise Only] Toggle for creating a token for the Consul snapshot agent deployment.") - c.flags.BoolVar(&c.flagCreateMeshGatewayToken, "create-mesh-gateway-token", false, - "Toggle for creating a token for a Connect mesh gateway.") + c.flags.BoolVar(&c.flagSnapshotAgent, "snapshot-agent", false, + "[Enterprise Only] Toggle for configuring ACL login for the snapshot agent.") + c.flags.BoolVar(&c.flagMeshGateway, "mesh-gateway", false, + "Toggle for configuring ACL login for the mesh gateway.") c.flags.Var((*flags.AppendSliceValue)(&c.flagIngressGatewayNames), "ingress-gateway-name", "Name of an ingress gateway that needs an acl token. May be specified multiple times. "+ "[Enterprise Only] If using Consul namespaces and registering the gateway outside of the "+ @@ -164,8 +157,8 @@ func (c *Command) init() { "Name of a terminating gateway that needs an acl token. May be specified multiple times. "+ "[Enterprise Only] If using Consul namespaces and registering the gateway outside of the "+ "default namespace, specify the value in the form ..") - c.flags.BoolVar(&c.flagCreateAPIGatewayToken, "create-api-gateway-token", false, - "Toggle for creating a token for the API Gateway controller integration.") + c.flags.BoolVar(&c.flagAPIGatewayController, "api-gateway-controller", false, + "Toggle for configuring ACL login for the API gateway controller.") c.flags.Var((*flags.AppendSliceValue)(&c.flagServerAddresses), "server-address", "The IP, DNS name or the cloud auto-join string of the Consul server(s). If providing IPs or DNS names, may be specified multiple times. "+ @@ -182,6 +175,8 @@ func (c *Command) init() { "[Enterprise Only] Enables Admin Partitions") c.flags.StringVar(&c.flagPartitionName, "partition", "", "[Enterprise Only] Name of the Admin Partition") + c.flags.StringVar(&c.flagPartitionTokenFile, "partition-token-file", "", + "[Enterprise Only] Path to file containing ACL token to be used in non-default partitions.") c.flags.BoolVar(&c.flagEnableNamespaces, "enable-namespaces", false, "[Enterprise Only] Enables namespaces, in either a single Consul namespace or mirrored [Enterprise only feature]") @@ -257,35 +252,34 @@ func (c *Command) Run(args []string) int { c.UI.Error(err.Error()) return 1 } - var aclReplicationToken string if c.flagACLReplicationTokenFile != "" { - // Load the ACL replication token from file. - tokenBytes, err := ioutil.ReadFile(c.flagACLReplicationTokenFile) + var err error + aclReplicationToken, err = loadTokenFromFile(c.flagACLReplicationTokenFile) if err != nil { - c.UI.Error(fmt.Sprintf("Unable to read ACL replication token from file %q: %s", c.flagACLReplicationTokenFile, err)) + c.UI.Error(err.Error()) return 1 } - if len(tokenBytes) == 0 { - c.UI.Error(fmt.Sprintf("ACL replication token file %q is empty", c.flagACLReplicationTokenFile)) + } + + var partitionToken string + if c.flagPartitionTokenFile != "" { + var err error + partitionToken, err = loadTokenFromFile(c.flagPartitionTokenFile) + if err != nil { + c.UI.Error(err.Error()) return 1 } - aclReplicationToken = strings.TrimSpace(string(tokenBytes)) } var providedBootstrapToken string if c.flagBootstrapTokenFile != "" { - // Load the bootstrap token from file. - tokenBytes, err := ioutil.ReadFile(c.flagBootstrapTokenFile) + var err error + providedBootstrapToken, err = loadTokenFromFile(c.flagBootstrapTokenFile) if err != nil { - c.UI.Error(fmt.Sprintf("Unable to read bootstrap token from file %q: %s", c.flagBootstrapTokenFile, err)) - return 1 - } - if len(tokenBytes) == 0 { - c.UI.Error(fmt.Sprintf("Bootstrap token file %q is empty", c.flagBootstrapTokenFile)) + c.UI.Error(err.Error()) return 1 } - providedBootstrapToken = strings.TrimSpace(string(tokenBytes)) } var cancel context.CancelFunc @@ -320,12 +314,7 @@ func (c *Command) Run(args []string) int { var bootstrapToken string - if c.flagBootstrapTokenFile != "" { - // If bootstrap token is provided, we skip server bootstrapping and use - // the provided token to create policies and tokens for the rest of the components. - c.log.Info("Bootstrap token is provided so skipping Consul server ACL bootstrapping") - bootstrapToken = providedBootstrapToken - } else if c.flagACLReplicationTokenFile != "" && !c.flagCreateACLReplicationToken { + if c.flagACLReplicationTokenFile != "" && !c.flagCreateACLReplicationToken { // If ACL replication is enabled, we don't need to ACL bootstrap the servers // since they will be performing replication. // We can use the replication token as our bootstrap token because it @@ -334,19 +323,19 @@ func (c *Command) Run(args []string) int { bootstrapToken = aclReplicationToken } else { // Check if we've already been bootstrapped. - var err error - bootTokenSecretName := c.withPrefix("bootstrap-acl-token") - bootstrapToken, err = c.getBootstrapToken(bootTokenSecretName) - if err != nil { - c.log.Error(fmt.Sprintf("Unexpected error looking for preexisting bootstrap Secret: %s", err)) - return 1 - } - - if bootstrapToken != "" { - c.log.Info(fmt.Sprintf("ACLs already bootstrapped - retrieved bootstrap token from Secret %q", bootTokenSecretName)) + var bootTokenSecretName string + if providedBootstrapToken != "" { + c.log.Info("Using provided bootstrap token") + bootstrapToken = providedBootstrapToken } else { - c.log.Info("No bootstrap token from previous installation found, continuing on to bootstrapping") + bootTokenSecretName = c.withPrefix("bootstrap-acl-token") + bootstrapToken, err = c.getBootstrapToken(bootTokenSecretName) + if err != nil { + c.log.Error(fmt.Sprintf("Unexpected error looking for preexisting bootstrap Secret: %s", err)) + return 1 + } } + bootstrapToken, err = c.bootstrapServers(serverAddresses, bootstrapToken, bootTokenSecretName, scheme) if err != nil { c.log.Error(err.Error()) @@ -365,10 +354,10 @@ func (c *Command) Run(args []string) int { CAFile: c.flagConsulCACert, }, } + if c.flagEnablePartitions { clientConfig.Partition = c.flagPartitionName } - consulClient, err := consul.NewClient(clientConfig) if err != nil { c.log.Error(fmt.Sprintf("Error creating Consul client for addr %q: %s", serverAddr, err)) @@ -380,11 +369,15 @@ func (c *Command) Run(args []string) int { return 1 } c.log.Info("Current datacenter", "datacenter", consulDC, "primaryDC", primaryDC) - isPrimary := consulDC == primaryDC + primary := consulDC == primaryDC - if c.flagEnablePartitions && c.flagPartitionName == consulDefaultPartition && isPrimary { + if c.flagEnablePartitions && c.flagPartitionName == consulDefaultPartition && primary { // Partition token is local because only the Primary datacenter can have Admin Partitions. - err := c.createLocalACL("partitions", partitionRules, consulDC, isPrimary, consulClient) + if c.flagPartitionTokenFile != "" { + err = c.createACLWithSecretID("partitions", partitionRules, consulDC, primary, consulClient, partitionToken, true) + } else { + err = c.createLocalACL("partitions", partitionRules, consulDC, primary, consulClient) + } if err != nil { c.log.Error(err.Error()) return 1 @@ -440,21 +433,40 @@ func (c *Command) Run(args []string) int { } } - if c.flagCreateClientToken { + // Create the component auth method, this is the auth method that Consul components will use + // to issue an `ACL().Login()` against at startup, for local tokens. + localComponentAuthMethodName := c.withPrefix("k8s-component-auth-method") + err = c.configureLocalComponentAuthMethod(consulClient, localComponentAuthMethodName) + if err != nil { + c.log.Error(err.Error()) + return 1 + } + + globalComponentAuthMethodName := fmt.Sprintf("%s-%s", localComponentAuthMethodName, consulDC) + if !primary && c.flagAuthMethodHost != "" { + err = c.configureGlobalComponentAuthMethod(consulClient, globalComponentAuthMethodName, primaryDC) + if err != nil { + c.log.Error(err.Error()) + return 1 + } + } + + if c.flagClient { agentRules, err := c.agentRules() if err != nil { c.log.Error("Error templating client agent rules", "err", err) return 1 } - err = c.createLocalACL("client", agentRules, consulDC, isPrimary, consulClient) + serviceAccountName := c.withPrefix("client") + err = c.createACLPolicyRoleAndBindingRule("client", agentRules, consulDC, primaryDC, false, primary, localComponentAuthMethodName, serviceAccountName, consulClient) if err != nil { c.log.Error(err.Error()) return 1 } } - if c.createAnonymousPolicy(isPrimary) { + if c.createAnonymousPolicy(primary) { // When the default partition is in a VM, the anonymous policy does not allow cross-partition // DNS lookups. The anonymous policy in the default partition needs to be updated in order to // support this use-case. Creating a separate anonymous token client that updates the anonymous @@ -476,19 +488,27 @@ func (c *Command) Run(args []string) int { } } - if c.flagCreateSyncToken { + if c.flagSyncCatalog { syncRules, err := c.syncRules() if err != nil { c.log.Error("Error templating sync rules", "err", err) return 1 } - // If namespaces are enabled, the policy and token needs to be global - // to be allowed to create namespaces. + serviceAccountName := c.withPrefix("sync-catalog") + componentAuthMethodName := localComponentAuthMethodName + + // If namespaces are enabled, the policy and token need to be global to be allowed to create namespaces. if c.flagEnableNamespaces { - err = c.createGlobalACL("catalog-sync", syncRules, consulDC, isPrimary, consulClient) + // Create the catalog sync ACL Policy, Role and BindingRule. + // SyncCatalog token must be global when namespaces are enabled. This means secondary datacenters need + // a token that is known by the primary datacenters. + if !primary { + componentAuthMethodName = globalComponentAuthMethodName + } + err = c.createACLPolicyRoleAndBindingRule("sync-catalog", syncRules, consulDC, primaryDC, globalPolicy, primary, componentAuthMethodName, serviceAccountName, consulClient) } else { - err = c.createLocalACL("catalog-sync", syncRules, consulDC, isPrimary, consulClient) + err = c.createACLPolicyRoleAndBindingRule("sync-catalog", syncRules, consulDC, primaryDC, localPolicy, primary, componentAuthMethodName, serviceAccountName, consulClient) } if err != nil { c.log.Error(err.Error()) @@ -496,8 +516,9 @@ func (c *Command) Run(args []string) int { } } - if c.flagCreateInjectToken { - err := c.configureConnectInjectAuthMethod(consulClient) + if c.flagConnectInject { + connectAuthMethodName := c.withPrefix("k8s-auth-method") + err := c.configureConnectInjectAuthMethod(consulClient, connectAuthMethodName) if err != nil { c.log.Error(err.Error()) return 1 @@ -510,14 +531,22 @@ func (c *Command) Run(args []string) int { return 1 } + serviceAccountName := c.withPrefix("connect-injector") + componentAuthMethodName := localComponentAuthMethodName + // If namespaces are enabled, the policy and token need to be global // to be allowed to create namespaces. if c.flagEnableNamespaces { - err = c.createGlobalACL("connect-inject", injectRules, consulDC, isPrimary, consulClient) + // Create the connect-inject ACL Policy, Role and BindingRule but do not issue any ACLTokens or create Kube Secrets. + // ConnectInjector token must be global when namespaces are enabled. This means secondary datacenters need + // a token that is known by the primary datacenters. + if !primary { + componentAuthMethodName = globalComponentAuthMethodName + } + err = c.createACLPolicyRoleAndBindingRule("connect-inject", injectRules, consulDC, primaryDC, globalPolicy, primary, componentAuthMethodName, serviceAccountName, consulClient) } else { - err = c.createLocalACL("connect-inject", injectRules, consulDC, isPrimary, consulClient) + err = c.createACLPolicyRoleAndBindingRule("connect-inject", injectRules, consulDC, primaryDC, localPolicy, primary, componentAuthMethodName, serviceAccountName, consulClient) } - if err != nil { c.log.Error(err.Error()) return 1 @@ -527,9 +556,9 @@ func (c *Command) Run(args []string) int { if c.flagCreateEntLicenseToken { var err error if c.flagEnablePartitions { - err = c.createLocalACL("enterprise-license", entPartitionLicenseRules, consulDC, isPrimary, consulClient) + err = c.createLocalACL("enterprise-license", entPartitionLicenseRules, consulDC, primary, consulClient) } else { - err = c.createLocalACL("enterprise-license", entLicenseRules, consulDC, isPrimary, consulClient) + err = c.createLocalACL("enterprise-license", entLicenseRules, consulDC, primary, consulClient) } if err != nil { c.log.Error(err.Error()) @@ -537,37 +566,42 @@ func (c *Command) Run(args []string) int { } } - if c.flagCreateSnapshotAgentToken { - err := c.createLocalACL("client-snapshot-agent", snapshotAgentRules, consulDC, isPrimary, consulClient) - if err != nil { + if c.flagSnapshotAgent { + serviceAccountName := c.withPrefix("snapshot-agent") + if err := c.createACLPolicyRoleAndBindingRule("snapshot-agent", snapshotAgentRules, consulDC, primaryDC, localPolicy, primary, localComponentAuthMethodName, serviceAccountName, consulClient); err != nil { c.log.Error(err.Error()) return 1 } } - if c.flagCreateAPIGatewayToken { - apigwRules, err := c.apiGatewayControllerRules() + if c.flagAPIGatewayController { + rules, err := c.apiGatewayControllerRules() if err != nil { c.log.Error("Error templating api gateway rules", "err", err) return 1 } - err = c.createLocalACL("api-gateway-controller", apigwRules, consulDC, isPrimary, consulClient) - if err != nil { + serviceAccountName := c.withPrefix("api-gateway-controller") + if err := c.createACLPolicyRoleAndBindingRule("api-gateway-controller", rules, consulDC, primaryDC, localPolicy, primary, localComponentAuthMethodName, serviceAccountName, consulClient); err != nil { c.log.Error(err.Error()) return 1 } } - if c.flagCreateMeshGatewayToken { - meshGatewayRules, err := c.meshGatewayRules() + if c.flagMeshGateway { + rules, err := c.meshGatewayRules() if err != nil { - c.log.Error("Error templating dns rules", "err", err) + c.log.Error("Error templating mesh gateway rules", "err", err) return 1 } + serviceAccountName := c.withPrefix("mesh-gateway") // Mesh gateways require a global policy/token because they must // discover services in other datacenters. - err = c.createGlobalACL("mesh-gateway", meshGatewayRules, consulDC, isPrimary, consulClient) + authMethodName := localComponentAuthMethodName + if !primary { + authMethodName = globalComponentAuthMethodName + } + err = c.createACLPolicyRoleAndBindingRule("mesh-gateway", rules, consulDC, primaryDC, globalPolicy, primary, authMethodName, serviceAccountName, consulClient) if err != nil { c.log.Error(err.Error()) return 1 @@ -575,114 +609,36 @@ func (c *Command) Run(args []string) int { } if len(c.flagIngressGatewayNames) > 0 { - // Create a token for each ingress gateway name. Each gateway needs a - // separate token because users may need to attach different policies - // to each gateway token depending on what the services it represents - for _, name := range c.flagIngressGatewayNames { - if name == "" { - c.log.Error("Ingress gateway names cannot be empty") - return 1 - } - - // Parse optional namespace, erroring if a user - // provides a namespace when not enabling namespaces. - var namespace string - if c.flagEnableNamespaces { - parts := strings.SplitN(strings.TrimSpace(name), ".", 2) - if len(parts) > 1 { - // Name and namespace were provided - name = parts[0] - - // Use default namespace if provided flag is of the - // form "name." - if parts[1] != "" { - namespace = parts[1] - } else { - namespace = consulDefaultNamespace - } - } else { - // Use the default Consul namespace - namespace = consulDefaultNamespace - } - } else if strings.ContainsAny(name, ".") { - c.log.Error("Gateway names shouldn't include a namespace if Consul namespaces aren't enabled", - "gateway-name", name) - return 1 - } - - // Define the gateway rules - ingressGatewayRules, err := c.ingressGatewayRules(name, namespace) - if err != nil { - c.log.Error("Error templating ingress gateway rules", "gateway-name", name, - "namespace", namespace, "err", err) - return 1 - } - - // The names in the Helm chart are specified by users and so may not contain - // the words "ingress-gateway". We need to create unique names for tokens - // across all gateway types and so must suffix with `-ingress-gateway`. - tokenName := fmt.Sprintf("%s-ingress-gateway", name) - err = c.createLocalACL(tokenName, ingressGatewayRules, consulDC, isPrimary, consulClient) - if err != nil { - c.log.Error(err.Error()) - return 1 - } + params := ConfigureGatewayParams{ + GatewayType: "ingress", + GatewayNames: c.flagIngressGatewayNames, + AuthMethodName: localComponentAuthMethodName, + RulesGenerator: c.ingressGatewayRules, + ConsulDC: consulDC, + PrimaryDC: primaryDC, + Primary: primary, + } + err := c.configureGateway(params, consulClient) + if err != nil { + c.log.Error(err.Error()) + return 1 } } if len(c.flagTerminatingGatewayNames) > 0 { - // Create a token for each terminating gateway name. Each gateway needs a - // separate token because users may need to attach different policies - // to each gateway token depending on what the services it represents - for _, name := range c.flagTerminatingGatewayNames { - if name == "" { - c.log.Error("Terminating gateway names cannot be empty") - return 1 - } - - // Parse optional namespace. This does not protect against a user - // that provides a namespace with namespaces not enabled. - var namespace string - if c.flagEnableNamespaces { - parts := strings.SplitN(strings.TrimSpace(name), ".", 2) - if len(parts) > 1 { - // Name and namespace were provided - name = parts[0] - - // Use default namespace if provided flag is of the - // form "name." - if parts[1] != "" { - namespace = parts[1] - } else { - namespace = consulDefaultNamespace - } - } else { - // Use the default Consul namespace - namespace = consulDefaultNamespace - } - } else if strings.ContainsAny(name, ".") { - c.log.Error("Gateway names shouldn't include a namespace if Consul namespaces aren't enabled", - "gateway-name", name) - return 1 - } - - // Define the gateway rules - terminatingGatewayRules, err := c.terminatingGatewayRules(name, namespace) - if err != nil { - c.log.Error("Error templating terminating gateway rules", "gateway-name", name, - "namespace", namespace, "err", err) - return 1 - } - - // The names in the Helm chart are specified by users and so may not contain - // the words "ingress-gateway". We need to create unique names for tokens - // across all gateway types and so must suffix with `-terminating-gateway`. - tokenName := fmt.Sprintf("%s-terminating-gateway", name) - err = c.createLocalACL(tokenName, terminatingGatewayRules, consulDC, isPrimary, consulClient) - if err != nil { - c.log.Error(err.Error()) - return 1 - } + params := ConfigureGatewayParams{ + GatewayType: "terminating", + GatewayNames: c.flagTerminatingGatewayNames, + AuthMethodName: localComponentAuthMethodName, + RulesGenerator: c.terminatingGatewayRules, + ConsulDC: consulDC, + PrimaryDC: primaryDC, + Primary: primary, + } + err := c.configureGateway(params, consulClient) + if err != nil { + c.log.Error(err.Error()) + return 1 } } @@ -695,9 +651,9 @@ func (c *Command) Run(args []string) int { // Policy must be global because it replicates from the primary DC // and so the primary DC needs to be able to accept the token. if aclReplicationToken != "" { - err = c.createGlobalACLWithSecretID(common.ACLReplicationTokenName, rules, consulDC, isPrimary, consulClient, aclReplicationToken) + err = c.createACLWithSecretID(common.ACLReplicationTokenName, rules, consulDC, primary, consulClient, aclReplicationToken, false) } else { - err = c.createGlobalACL(common.ACLReplicationTokenName, rules, consulDC, isPrimary, consulClient) + err = c.createGlobalACL(common.ACLReplicationTokenName, rules, consulDC, primary, consulClient) } if err != nil { c.log.Error(err.Error()) @@ -705,26 +661,154 @@ func (c *Command) Run(args []string) int { } } - if c.flagCreateControllerToken { + if c.flagController { rules, err := c.controllerRules() if err != nil { c.log.Error("Error templating controller token rules", "err", err) return 1 } + + serviceAccountName := c.withPrefix("controller") + + // Create the controller ACL Policy, Role and BindingRule but do not issue any ACLTokens or create Kube Secrets. // Controller token must be global because config entry writes all // go to the primary datacenter. This means secondary datacenters need // a token that is known by the primary datacenters. - err = c.createGlobalACL("controller", rules, consulDC, isPrimary, consulClient) + authMethodName := localComponentAuthMethodName + if !primary { + authMethodName = globalComponentAuthMethodName + } + err = c.createACLPolicyRoleAndBindingRule("controller", rules, consulDC, primaryDC, globalPolicy, primary, authMethodName, serviceAccountName, consulClient) if err != nil { c.log.Error(err.Error()) return 1 } } - c.log.Info("server-acl-init completed successfully") return 0 } +// configureGlobalComponentAuthMethod sets up an AuthMethod in the primary datacenter, +// that the Consul components will use to issue global ACL tokens with. +func (c *Command) configureGlobalComponentAuthMethod(consulClient *api.Client, authMethodName, primaryDC string) error { + // Create the auth method template. This requires calls to the kubernetes environment. + authMethod, err := c.createAuthMethodTmpl(authMethodName, false) + if err != nil { + return err + } + authMethod.TokenLocality = "global" + writeOptions := &api.WriteOptions{Datacenter: primaryDC} + return c.createAuthMethod(consulClient, &authMethod, writeOptions) +} + +// configureLocalComponentAuthMethod sets up an AuthMethod in the same datacenter, +// that the Consul components will use to issue local ACL tokens with. +func (c *Command) configureLocalComponentAuthMethod(consulClient *api.Client, authMethodName string) error { + // Create the auth method template. This requires calls to the kubernetes environment. + authMethod, err := c.createAuthMethodTmpl(authMethodName, false) + if err != nil { + return err + } + return c.createAuthMethod(consulClient, &authMethod, &api.WriteOptions{}) +} + +// createAuthMethod creates the desired Authmethod. +func (c *Command) createAuthMethod(consulClient *api.Client, authMethod *api.ACLAuthMethod, writeOptions *api.WriteOptions) error { + return c.untilSucceeds(fmt.Sprintf("creating auth method %s", authMethod.Name), + func() error { + var err error + // `AuthMethodCreate` will also be able to update an existing + // AuthMethod based on the name provided. This means that any + // configuration changes will correctly update the AuthMethod. + _, _, err = consulClient.ACL().AuthMethodCreate(authMethod, writeOptions) + return err + }) +} + +type gatewayRulesGenerator func(name, namespace string) (string, error) + +// ConfigureGatewayParams are parameters used to configure Ingress and Terminating Gateways. +type ConfigureGatewayParams struct { + // GatewayType specifies whether it is an ingress or terminating gateway. + GatewayType string + //GatewayNames is the collection of gateways that have been specified. + GatewayNames []string + //AuthMethodName is the authmethod for which to register the binding rules and policies for the gateways + AuthMethodName string + //RuleGenerator is the function that supplies the rules that will be added to the policy. + RulesGenerator gatewayRulesGenerator + //ConsulDC is the name of the DC where the gateways will be registered + ConsulDC string + //PrimaryDC is the name of the Primary Data Center + PrimaryDC string + //Primary specifies whether the ConsulDC is the Primary Data Center + Primary bool +} + +func (c *Command) configureGateway(gatewayParams ConfigureGatewayParams, consulClient *api.Client) error { + // Each gateway needs to be configured + // separately because users may need to attach different policies + // to each gateway role depending on what services it represents. + for _, name := range gatewayParams.GatewayNames { + if name == "" { + errMessage := fmt.Sprintf("%s gateway name cannot be empty", + strings.Title(strings.ToLower(gatewayParams.GatewayType))) + c.log.Error(errMessage) + return errors.New(errMessage) + } + + // Parse optional namespace, erroring if a user + // provides a namespace when not enabling namespaces. + var namespace string + if c.flagEnableNamespaces { + parts := strings.SplitN(strings.TrimSpace(name), ".", 2) + if len(parts) > 1 { + // Name and namespace were provided + name = parts[0] + + // Use default namespace if provided flag is of the + // form "name." + if parts[1] != "" { + namespace = parts[1] + } else { + namespace = consulDefaultNamespace + } + } else { + // Use the default Consul namespace + namespace = consulDefaultNamespace + } + } else if strings.ContainsAny(name, ".") { + errMessage := "gateway names shouldn't include a namespace if Consul namespaces aren't enabled" + c.log.Error(errMessage, "gateway-name", name) + return errors.New(errMessage) + } + + // Define the gateway rules + rules, err := gatewayParams.RulesGenerator(name, namespace) + if err != nil { + + errMessage := fmt.Sprintf("error templating %s gateway rules", + gatewayParams.GatewayType) + c.log.Error(errMessage, "gateway-name", name, + "namespace", namespace, "err", err) + return errors.New(errMessage) + } + + // The names in the Helm chart are specified by users and so may not contain + // the words "ingress-gateway" or "terminating-gateway". We need to create unique names for tokens + // across all gateway types and so must suffix with either `-ingress-gateway` of `-terminating-gateway`. + serviceAccountName := c.withPrefix(name) + err = c.createACLPolicyRoleAndBindingRule(serviceAccountName, rules, + gatewayParams.ConsulDC, gatewayParams.PrimaryDC, localPolicy, + gatewayParams.Primary, gatewayParams.AuthMethodName, serviceAccountName, consulClient) + if err != nil { + c.log.Error(err.Error()) + return err + } + } + return nil +} + // getBootstrapToken returns the existing bootstrap token if there is one by // reading the Kubernetes Secret with name secretName. // If there is no bootstrap token yet, then it returns an empty string (not an error). @@ -844,7 +928,7 @@ func (c *Command) createAnonymousPolicy(isPrimary bool) bool { // on cross-dc API calls. The cross-dc API calls thus use the anonymous // token. Cross-dc API calls are needed by the Connect proxies to talk // cross-dc. - (c.flagCreateInjectToken && c.flagFederation)) + (c.flagConnectInject && c.flagFederation)) } func (c *Command) validateFlags() error { @@ -886,10 +970,25 @@ func (c *Command) validateFlags() error { return nil } -const consulDefaultNamespace = "default" -const consulDefaultPartition = "default" -const synopsis = "Initialize ACLs on Consul servers and other components." -const help = ` +func loadTokenFromFile(tokenFile string) (string, error) { + // Load the bootstrap token from file. + tokenBytes, err := ioutil.ReadFile(tokenFile) + if err != nil { + return "", fmt.Errorf("unable to read token from file %q: %s", tokenFile, err) + } + if len(tokenBytes) == 0 { + return "", fmt.Errorf("token file %q is empty", tokenFile) + } + return strings.TrimSpace(string(tokenBytes)), nil +} + +const ( + consulDefaultNamespace = "default" + consulDefaultPartition = "default" + globalPolicy = true + localPolicy = false + synopsis = "Initialize ACLs on Consul servers and other components." + help = ` Usage: consul-k8s-control-plane server-acl-init [options] Bootstraps servers with ACLs and creates policies and ACL tokens for other @@ -898,3 +997,4 @@ Usage: consul-k8s-control-plane server-acl-init [options] and safe to run multiple times. ` +) diff --git a/control-plane/subcommand/server-acl-init/command_ent_test.go b/control-plane/subcommand/server-acl-init/command_ent_test.go index 5824d4af9b..10e3650b12 100644 --- a/control-plane/subcommand/server-acl-init/command_ent_test.go +++ b/control-plane/subcommand/server-acl-init/command_ent_test.go @@ -4,15 +4,23 @@ package serveraclinit import ( "context" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" "strings" "testing" "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" ) @@ -24,7 +32,7 @@ func TestRun_ConnectInject_SingleDestinationNamespace(t *testing.T) { consulDestNamespaces := []string{"default", "destination"} for _, consulDestNamespace := range consulDestNamespaces { t.Run(consulDestNamespace, func(tt *testing.T) { - k8s, testAgent := completeEnterpriseSetup(tt) + k8s, testAgent := completeSetup(tt) defer testAgent.Stop() setUpK8sServiceAccount(tt, k8s, ns) require := require.New(tt) @@ -40,7 +48,7 @@ func TestRun_ConnectInject_SingleDestinationNamespace(t *testing.T) { "-server-port=" + strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-create-inject-token", + "-connect-inject", "-enable-partitions", "-partition=default", "-enable-namespaces", @@ -64,7 +72,13 @@ func TestRun_ConnectInject_SingleDestinationNamespace(t *testing.T) { } methods, _, err := consul.ACL().AuthMethodList(namespaceQuery) require.NoError(err) - require.Len(methods, 1) + if consulDestNamespace == "default" { + // If the destination mamespace is default then AuthMethodList + // will return the component-auth-method as well. + require.Len(methods, 2) + } else { + require.Len(methods, 1) + } // Check the ACL auth method is created in the expected namespace. authMethodName := resourcePrefix + "-k8s-auth-method" @@ -146,7 +160,7 @@ func TestRun_ConnectInject_NamespaceMirroring(t *testing.T) { for name, c := range cases { t.Run(name, func(tt *testing.T) { - k8s, testAgent := completeEnterpriseSetup(tt) + k8s, testAgent := completeSetup(tt) defer testAgent.Stop() setUpK8sServiceAccount(tt, k8s, ns) require := require.New(tt) @@ -162,7 +176,7 @@ func TestRun_ConnectInject_NamespaceMirroring(t *testing.T) { "-server-port=" + strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-create-inject-token", + "-connect-inject", "-enable-partitions", "-partition=default", "-enable-namespaces", @@ -263,7 +277,7 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { k8sNamespaceFlags := []string{"default", "other"} for _, k8sNamespaceFlag := range k8sNamespaceFlags { t.Run(k8sNamespaceFlag, func(t *testing.T) { - k8s, testAgent := completeEnterpriseSetup(t) + k8s, testAgent := completeSetup(t) setUpK8sServiceAccount(t, k8s, k8sNamespaceFlag) defer testAgent.Stop() require := require.New(t) @@ -274,18 +288,18 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { "-server-port=" + strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace", k8sNamespaceFlag, - "-create-client-token", + "-client", "-allow-dns", - "-create-mesh-gateway-token", - "-create-sync-token", - "-create-inject-token", - "-create-snapshot-agent-token", + "-mesh-gateway", + "-sync-catalog", + "-connect-inject", + "-snapshot-agent", "-create-enterprise-license-token", - "-ingress-gateway-name=gw", - "-ingress-gateway-name=anothergw", - "-terminating-gateway-name=gw", - "-terminating-gateway-name=anothergw", - "-create-controller-token", + "-ingress-gateway-name=igw", + "-ingress-gateway-name=anotherigw", + "-terminating-gateway-name=tgw", + "-terminating-gateway-name=anothertgw", + "-controller", } // Our second run, we're going to update from partitions and namespaces disabled to // namespaces enabled with a single destination ns and partitions enabled. @@ -314,17 +328,17 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { // Check that the expected policies were created. firstRunExpectedPolicies := []string{ "anonymous-token-policy", - "client-token", - "catalog-sync-token", - "mesh-gateway-token", - "client-snapshot-agent-token", + "client-policy", + "sync-catalog-policy", + "mesh-gateway-policy", + "snapshot-agent-policy", "enterprise-license-token", - "gw-ingress-gateway-token", - "anothergw-ingress-gateway-token", - "gw-terminating-gateway-token", - "anothergw-terminating-gateway-token", - "connect-inject-token", - "controller-token", + resourcePrefix + "-igw-policy", + resourcePrefix + "-anotherigw-policy", + resourcePrefix + "-tgw-policy", + resourcePrefix + "-anothertgw-policy", + "connect-inject-policy", + "controller-policy", } policies, _, err := consul.ACL().PolicyList(nil) require.NoError(err) @@ -365,18 +379,18 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { // Check that the policies have all been updated. secondRunExpectedPolicies := []string{ "anonymous-token-policy", - "client-token", - "catalog-sync-token", - "connect-inject-token", - "mesh-gateway-token", - "client-snapshot-agent-token", + "client-policy", + "sync-catalog-policy", + "connect-inject-policy", + "mesh-gateway-policy", + "snapshot-agent-policy", "enterprise-license-token", "cross-namespace-policy", - "gw-ingress-gateway-token", - "anothergw-ingress-gateway-token", - "gw-terminating-gateway-token", - "anothergw-terminating-gateway-token", - "controller-token", + resourcePrefix + "-igw-policy", + resourcePrefix + "-anotherigw-policy", + resourcePrefix + "-tgw-policy", + resourcePrefix + "-anothertgw-policy", + "controller-policy", "partitions-token", } policies, _, err = consul.ACL().PolicyList(nil) @@ -401,11 +415,11 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { require.True(ok, "Did not find policy %s", expected) switch expected { - case "connect-inject-token": + case "connect-inject-policy": // The connect inject token doesn't have namespace config, // but does change to operator:write from an empty string. require.Contains(actRules, "policy = \"write\"") - case "client-snapshot-agent-token", "enterprise-license-token": + case "snapshot-agent-policy", "enterprise-license-token": // The snapshot agent and enterprise license tokens shouldn't change. require.NotContains(actRules, "namespace") require.Contains(actRules, "acl = \"write\"") @@ -577,7 +591,7 @@ func TestRun_ConnectInject_Updates(t *testing.T) { for name, c := range cases { t.Run(name, func(tt *testing.T) { require := require.New(tt) - k8s, testAgent := completeEnterpriseSetup(tt) + k8s, testAgent := completeSetup(tt) defer testAgent.Stop() setUpK8sServiceAccount(tt, k8s, ns) @@ -589,7 +603,7 @@ func TestRun_ConnectInject_Updates(t *testing.T) { "-k8s-namespace=" + ns, "-enable-partitions", "-partition=default", - "-create-inject-token", + "-connect-inject", } // First run. NOTE: we don't assert anything here since we've @@ -658,27 +672,6 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { SecretNames []string LocalToken bool }{ - "client token": { - TokenFlags: []string{"-create-client-token"}, - PolicyNames: []string{"client-token"}, - PolicyDCs: []string{"dc1"}, - SecretNames: []string{resourcePrefix + "-client-acl-token"}, - LocalToken: true, - }, - "catalog-sync token": { - TokenFlags: []string{"-create-sync-token"}, - PolicyNames: []string{"catalog-sync-token"}, - PolicyDCs: nil, - SecretNames: []string{resourcePrefix + "-catalog-sync-acl-token"}, - LocalToken: false, - }, - "connect-inject-token": { - TokenFlags: []string{"-create-inject-token", "-enable-namespaces"}, - PolicyNames: []string{"connect-inject-token"}, - PolicyDCs: nil, - SecretNames: []string{resourcePrefix + "-connect-inject-acl-token"}, - LocalToken: false, - }, "enterprise-license token": { TokenFlags: []string{"-create-enterprise-license-token"}, PolicyNames: []string{"enterprise-license-token"}, @@ -686,46 +679,6 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { SecretNames: []string{resourcePrefix + "-enterprise-license-acl-token"}, LocalToken: true, }, - "client-snapshot-agent token": { - TokenFlags: []string{"-create-snapshot-agent-token"}, - PolicyNames: []string{"client-snapshot-agent-token"}, - PolicyDCs: []string{"dc1"}, - SecretNames: []string{resourcePrefix + "-client-snapshot-agent-acl-token"}, - LocalToken: true, - }, - "mesh-gateway token": { - TokenFlags: []string{"-create-mesh-gateway-token"}, - PolicyNames: []string{"mesh-gateway-token"}, - PolicyDCs: nil, - SecretNames: []string{resourcePrefix + "-mesh-gateway-acl-token"}, - LocalToken: false, - }, - "ingress gateway tokens": { - TokenFlags: []string{"-ingress-gateway-name=ingress", - "-ingress-gateway-name=gateway", - "-ingress-gateway-name=another-gateway"}, - PolicyNames: []string{"ingress-ingress-gateway-token", - "gateway-ingress-gateway-token", - "another-gateway-ingress-gateway-token"}, - PolicyDCs: []string{"dc1"}, - SecretNames: []string{resourcePrefix + "-ingress-ingress-gateway-acl-token", - resourcePrefix + "-gateway-ingress-gateway-acl-token", - resourcePrefix + "-another-gateway-ingress-gateway-acl-token"}, - LocalToken: true, - }, - "terminating gateway tokens": { - TokenFlags: []string{"-terminating-gateway-name=terminating", - "-terminating-gateway-name=gateway", - "-terminating-gateway-name=another-gateway"}, - PolicyNames: []string{"terminating-terminating-gateway-token", - "gateway-terminating-gateway-token", - "another-gateway-terminating-gateway-token"}, - PolicyDCs: []string{"dc1"}, - SecretNames: []string{resourcePrefix + "-terminating-terminating-gateway-acl-token", - resourcePrefix + "-gateway-terminating-gateway-acl-token", - resourcePrefix + "-another-gateway-terminating-gateway-acl-token"}, - LocalToken: true, - }, "acl-replication token": { TokenFlags: []string{"-create-acl-replication-token"}, PolicyNames: []string{"acl-replication-token"}, @@ -733,27 +686,6 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { SecretNames: []string{resourcePrefix + "-acl-replication-acl-token"}, LocalToken: false, }, - "inject token with namespaces (deprecated)": { - TokenFlags: []string{"-create-inject-auth-method", "-enable-namespaces", "-create-inject-namespace-token"}, - PolicyNames: []string{"connect-inject-token"}, - PolicyDCs: nil, - SecretNames: []string{resourcePrefix + "-connect-inject-acl-token"}, - LocalToken: false, - }, - "inject token and namespaces": { - TokenFlags: []string{"-create-inject-token", "-enable-namespaces"}, - PolicyNames: []string{"connect-inject-token"}, - PolicyDCs: nil, - SecretNames: []string{resourcePrefix + "-connect-inject-acl-token"}, - LocalToken: false, - }, - "controller token": { - TokenFlags: []string{"-create-controller-token"}, - PolicyNames: []string{"controller-token"}, - PolicyDCs: nil, - SecretNames: []string{resourcePrefix + "-controller-acl-token"}, - LocalToken: false, - }, "partitions token": { TokenFlags: []string{"-enable-partitions", "-partition=default"}, PolicyNames: []string{"partitions-token"}, @@ -764,7 +696,7 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { } for testName, c := range cases { t.Run(testName, func(t *testing.T) { - k8s, testSvr := completeEnterpriseSetup(t) + k8s, testSvr := completeSetup(t) setUpK8sServiceAccount(t, k8s, ns) defer testSvr.Stop() require := require.New(t) @@ -797,17 +729,16 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { }) require.NoError(err) + // Check that the expected policy was created. for i := range c.PolicyNames { policy := policyExists(t, c.PolicyNames[i], consul) require.Equal(c.PolicyDCs, policy.Datacenters) - // Test that the token was created as a Kubernetes Secret. tokenSecret, err := k8s.CoreV1().Secrets(ns).Get(context.Background(), c.SecretNames[i], metav1.GetOptions{}) require.NoError(err) require.NotNil(tokenSecret) token, ok := tokenSecret.Data["token"] require.True(ok) - // Test that the token has the expected policies in Consul. tokenData, _, err := consul.ACL().TokenReadSelf(&api.QueryOptions{Token: string(token)}) require.NoError(err) @@ -845,9 +776,9 @@ func TestRun_GatewayNamespaceParsing(t *testing.T) { TokenFlags: []string{"-ingress-gateway-name=ingress", "-ingress-gateway-name=gateway", "-ingress-gateway-name=another-gateway"}, - PolicyNames: []string{"ingress-ingress-gateway-token", - "gateway-ingress-gateway-token", - "another-gateway-ingress-gateway-token"}, + PolicyNames: []string{resourcePrefix + "-ingress-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, ExpectedPolicies: []string{` partition "default" { namespace "default" { @@ -894,9 +825,9 @@ partition "default" { TokenFlags: []string{"-ingress-gateway-name=ingress.", "-ingress-gateway-name=gateway.namespace1", "-ingress-gateway-name=another-gateway.namespace2"}, - PolicyNames: []string{"ingress-ingress-gateway-token", - "gateway-ingress-gateway-token", - "another-gateway-ingress-gateway-token"}, + PolicyNames: []string{resourcePrefix + "-ingress-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, ExpectedPolicies: []string{` partition "default" { namespace "default" { @@ -943,9 +874,9 @@ partition "default" { TokenFlags: []string{"-terminating-gateway-name=terminating", "-terminating-gateway-name=gateway", "-terminating-gateway-name=another-gateway"}, - PolicyNames: []string{"terminating-terminating-gateway-token", - "gateway-terminating-gateway-token", - "another-gateway-terminating-gateway-token"}, + PolicyNames: []string{resourcePrefix + "-terminating-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, ExpectedPolicies: []string{` partition "default" { namespace "default" { @@ -983,9 +914,9 @@ partition "default" { TokenFlags: []string{"-terminating-gateway-name=terminating.", "-terminating-gateway-name=gateway.namespace1", "-terminating-gateway-name=another-gateway.namespace2"}, - PolicyNames: []string{"terminating-terminating-gateway-token", - "gateway-terminating-gateway-token", - "another-gateway-terminating-gateway-token"}, + PolicyNames: []string{resourcePrefix + "-terminating-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, ExpectedPolicies: []string{` partition "default" { namespace "default" { @@ -1021,8 +952,9 @@ partition "default" { } for _, c := range cases { t.Run(c.TestName, func(t *testing.T) { - k8s, testSvr := completeEnterpriseSetup(t) + k8s, testSvr := completeSetup(t) defer testSvr.Stop() + setUpK8sServiceAccount(t, k8s, ns) require := require.New(t) // Run the command. @@ -1076,16 +1008,259 @@ partition "default" { } } -// Set up test consul agent and kubernetes cluster. -func completeEnterpriseSetup(t *testing.T) (*fake.Clientset, *testutil.TestServer) { - k8s := fake.NewSimpleClientset() +// Test that server-acl-init used the local auth method to create the desired token in the primary datacenter. +// The test works by running the login command and then ensuring that the token +// returned has the correct role for the component. +func TestRun_NamespaceEnabled_ValidateLoginToken_PrimaryDatacenter(t *testing.T) { + t.Parallel() - svr, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - c.ACL.Enabled = true + cases := []struct { + ComponentName string + TokenFlags []string + Roles []string + Namespace string + GlobalToken bool + }{ + { + ComponentName: "connect-injector", + TokenFlags: []string{"-connect-inject"}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role"}, + Namespace: ns, + GlobalToken: false, + }, + { + ComponentName: "sync-catalog", + TokenFlags: []string{"-sync-catalog"}, + Roles: []string{resourcePrefix + "-sync-catalog-acl-role"}, + Namespace: ns, + GlobalToken: false, + }, + } + for _, c := range cases { + t.Run(c.ComponentName, func(t *testing.T) { + authMethodName := fmt.Sprintf("%s-%s", resourcePrefix, componentAuthMethod) + serviceAccountName := fmt.Sprintf("%s-%s", resourcePrefix, c.ComponentName) + + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() + _, jwtToken := setUpK8sServiceAccount(t, k8s, c.Namespace) + + k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("content-type", "application/json") + if r != nil && r.URL.Path == "/apis/authentication.k8s.io/v1/tokenreviews" && r.Method == "POST" { + w.Write([]byte(test.TokenReviewsResponse(serviceAccountName, c.Namespace))) + } + if r != nil && r.URL.Path == fmt.Sprintf("/api/v1/namespaces/%s/serviceaccounts/%s", c.Namespace, serviceAccountName) && + r.Method == "GET" { + w.Write([]byte(test.ServiceAccountGetResponse(serviceAccountName, c.Namespace))) + } + })) + t.Cleanup(k8sMockServer.Close) + + // Run the command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + cmdArgs := append([]string{ + "-timeout=500ms", + "-resource-prefix=" + resourcePrefix, + "-enable-namespaces", + "-k8s-namespace=" + c.Namespace, + "-enable-namespaces", + "-consul-inject-destination-namespace", c.Namespace, + "-auth-method-host=" + k8sMockServer.URL, + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + }, c.TokenFlags...) + cmd.init() + responseCode := cmd.Run(cmdArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + client, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + }) + require.NoError(t, err) + + tok, _, err := client.ACL().Login(&api.ACLLoginParams{ + AuthMethod: authMethodName, + BearerToken: jwtToken, + Meta: map[string]string{}, + }, &api.WriteOptions{}) + require.NoError(t, err) + + require.Equal(t, len(tok.Roles), len(c.Roles)) + for _, role := range tok.Roles { + require.Contains(t, c.Roles, role.Name) + } + require.Equal(t, !c.GlobalToken, tok.Local) + }) + } +} + +// Test that server-acl-init used the global auth method to create the desired token in the secondary datacenter. +// The test works by running the login command and then ensuring that the token +// returned has the correct role for the component. +func TestRun_NamespaceEnabled_ValidateLoginToken_SecondaryDatacenter(t *testing.T) { + t.Parallel() + + cases := []struct { + ComponentName string + TokenFlags []string + Roles []string + Namespace string + GlobalToken bool + }{ + { + ComponentName: "connect-injector", + TokenFlags: []string{"-connect-inject"}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role-dc2"}, + Namespace: ns, + GlobalToken: true, + }, + { + ComponentName: "sync-catalog", + TokenFlags: []string{"-sync-catalog"}, + Roles: []string{resourcePrefix + "-sync-catalog-acl-role-dc2"}, + Namespace: ns, + GlobalToken: true, + }, + } + for _, c := range cases { + t.Run(c.ComponentName, func(t *testing.T) { + bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" + tokenFile := common.WriteTempFile(t, bootToken) + authMethodName := fmt.Sprintf("%s-%s-%s", resourcePrefix, componentAuthMethod, "dc2") + serviceAccountName := fmt.Sprintf("%s-%s", resourcePrefix, c.ComponentName) + + k8s, _, consulHTTPAddr, cleanup := mockReplicatedSetup(t, bootToken) + defer cleanup() + _, jwtToken := setUpK8sServiceAccount(t, k8s, c.Namespace) + + k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("content-type", "application/json") + if r != nil && r.URL.Path == "/apis/authentication.k8s.io/v1/tokenreviews" && r.Method == "POST" { + w.Write([]byte(test.TokenReviewsResponse(serviceAccountName, c.Namespace))) + } + if r != nil && r.URL.Path == fmt.Sprintf("/api/v1/namespaces/%s/serviceaccounts/%s", c.Namespace, serviceAccountName) && + r.Method == "GET" { + w.Write([]byte(test.ServiceAccountGetResponse(serviceAccountName, c.Namespace))) + } + })) + t.Cleanup(k8sMockServer.Close) + + // Run the command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + cmdArgs := append([]string{ + "-federation", + "-timeout=1m", + "-resource-prefix=" + resourcePrefix, + "-enable-namespaces", + "-k8s-namespace=" + c.Namespace, + "-enable-namespaces", + "-consul-inject-destination-namespace", c.Namespace, + "-acl-replication-token-file", tokenFile, + "-auth-method-host=" + k8sMockServer.URL, + "-server-address", strings.Split(consulHTTPAddr, ":")[0], + "-server-port", strings.Split(consulHTTPAddr, ":")[1], + }, c.TokenFlags...) + cmd.init() + responseCode := cmd.Run(cmdArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + client, err := api.NewClient(&api.Config{ + Address: consulHTTPAddr, + Datacenter: "dc1", + }) + require.NoError(t, err) + + retry.Run(t, func(r *retry.R) { + tok, _, err := client.ACL().Login(&api.ACLLoginParams{ + AuthMethod: authMethodName, + BearerToken: jwtToken, + Meta: map[string]string{}, + }, &api.WriteOptions{}) + require.NoError(r, err) + + require.Equal(r, len(tok.Roles), len(c.Roles)) + for _, role := range tok.Roles { + require.Contains(r, c.Roles, role.Name) + } + require.Equal(r, !c.GlobalToken, tok.Local) + }) + }) + } +} + +// Test that the partition token can be created when it's provided with a file. +func TestRun_PartitionTokenDefaultPartition_WithProvidedSecretID(t *testing.T) { + t.Parallel() + + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() + setUpK8sServiceAccount(t, k8s, ns) + + partitionToken := "123e4567-e89b-12d3-a456-426614174000" + partitionTokenFile, err := ioutil.TempFile("", "partitiontoken") + require.NoError(t, err) + defer os.Remove(partitionTokenFile.Name()) + + partitionTokenFile.WriteString(partitionToken) + // Run the command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + cmd.init() + cmdArgs := []string{ + "-timeout=1m", + "-k8s-namespace=" + ns, + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + "-resource-prefix=" + resourcePrefix, + "-enable-partitions", + "-partition=default", + "-partition-token-file", partitionTokenFile.Name(), + } + + responseCode := cmd.Run(cmdArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + // Check that this token is created. + consul, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + Token: partitionToken, }) require.NoError(t, err) + token, _, err := consul.ACL().TokenReadSelf(nil) + require.NoError(t, err) - return k8s, svr + for _, policyLink := range token.Policies { + policy := policyExists(t, policyLink.Name, consul) + require.Equal(t, policy.Datacenters, []string{"dc1"}) + + // Test that the token was not created as a Kubernetes Secret. + _, err := k8s.CoreV1().Secrets(ns).Get(context.Background(), resourcePrefix+"-partitions-acl-token", metav1.GetOptions{}) + require.True(t, k8serrors.IsNotFound(err)) + } + + // Test that if the same command is run again, it doesn't error. + t.Run(t.Name()+"-retried", func(t *testing.T) { + ui = cli.NewMockUi() + cmd = Command{ + UI: ui, + clientset: k8s, + } + cmd.init() + responseCode = cmd.Run(cmdArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + }) } // partitionedSetup is a helper function which creates a server and a consul agent that runs as diff --git a/control-plane/subcommand/server-acl-init/command_test.go b/control-plane/subcommand/server-acl-init/command_test.go index bbccfcc20d..326e3cc57c 100644 --- a/control-plane/subcommand/server-acl-init/command_test.go +++ b/control-plane/subcommand/server-acl-init/command_test.go @@ -16,6 +16,16 @@ import ( "testing" "time" + "github.com/hashicorp/consul-k8s/control-plane/helper/cert" + "github.com/hashicorp/consul-k8s/control-plane/helper/go-discover/mocks" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/go-discover" + "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -23,23 +33,15 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" - - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/freeport" - "github.com/hashicorp/consul/sdk/testutil" - "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/hashicorp/go-discover" - "github.com/hashicorp/go-hclog" - - "github.com/hashicorp/consul-k8s/control-plane/helper/cert" - "github.com/hashicorp/consul-k8s/control-plane/helper/go-discover/mocks" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" - "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" ) var ns = "default" var resourcePrefix = "release-name-consul" +const ( + componentAuthMethod = "k8s-component-auth-method" +) + func TestRun_FlagValidation(t *testing.T) { t.Parallel() @@ -57,11 +59,11 @@ func TestRun_FlagValidation(t *testing.T) { }, { Flags: []string{"-acl-replication-token-file=/notexist", "-server-address=localhost", "-resource-prefix=prefix"}, - ExpErr: "Unable to read ACL replication token from file \"/notexist\": open /notexist: no such file or directory", + ExpErr: "unable to read token from file \"/notexist\": open /notexist: no such file or directory", }, { Flags: []string{"-bootstrap-token-file=/notexist", "-server-address=localhost", "-resource-prefix=prefix"}, - ExpErr: "Unable to read bootstrap token from file \"/notexist\": open /notexist: no such file or directory", + ExpErr: "unable to read token from file \"/notexist\": open /notexist: no such file or directory", }, { Flags: []string{ @@ -104,7 +106,7 @@ func TestRun_Defaults(t *testing.T) { k8s, testSvr := completeSetup(t) defer testSvr.Stop() - require := require.New(t) + setUpK8sServiceAccount(t, k8s, ns) // Run the command. ui := cli.NewMockUi() @@ -120,7 +122,7 @@ func TestRun_Defaults(t *testing.T) { "-resource-prefix=" + resourcePrefix, } responseCode := cmd.Run(args) - require.Equal(0, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) // Test that the bootstrap kube secret is created. bootToken := getBootToken(t, k8s, resourcePrefix, ns) @@ -130,15 +132,15 @@ func TestRun_Defaults(t *testing.T) { Address: testSvr.HTTPAddr, Token: bootToken, }) - require.NoError(err) + require.NoError(t, err) tokenData, _, err := consul.ACL().TokenReadSelf(nil) - require.NoError(err) - require.Equal("global-management", tokenData.Policies[0].Name) + require.NoError(t, err) + require.Equal(t, "global-management", tokenData.Policies[0].Name) // Check that the agent policy was created. agentPolicy := policyExists(t, "agent-token", consul) // Should be a global policy. - require.Len(agentPolicy.Datacenters, 0) + require.Len(t, agentPolicy.Datacenters, 0) // We should also test that the server's token was updated, however I // couldn't find a way to test that with the test agent. Instead we test @@ -160,22 +162,6 @@ func TestRun_TokensPrimaryDC(t *testing.T) { SecretNames []string LocalToken bool }{ - { - TestName: "Client token", - TokenFlags: []string{"-create-client-token"}, - PolicyNames: []string{"client-token"}, - PolicyDCs: []string{"dc1"}, - SecretNames: []string{resourcePrefix + "-client-acl-token"}, - LocalToken: true, - }, - { - TestName: "Sync token", - TokenFlags: []string{"-create-sync-token"}, - PolicyNames: []string{"catalog-sync-token"}, - PolicyDCs: []string{"dc1"}, - SecretNames: []string{resourcePrefix + "-catalog-sync-acl-token"}, - LocalToken: true, - }, { TestName: "Enterprise license token", TokenFlags: []string{"-create-enterprise-license-token"}, @@ -184,58 +170,6 @@ func TestRun_TokensPrimaryDC(t *testing.T) { SecretNames: []string{resourcePrefix + "-enterprise-license-acl-token"}, LocalToken: true, }, - { - TestName: "Snapshot agent token", - TokenFlags: []string{"-create-snapshot-agent-token"}, - PolicyNames: []string{"client-snapshot-agent-token"}, - PolicyDCs: []string{"dc1"}, - SecretNames: []string{resourcePrefix + "-client-snapshot-agent-acl-token"}, - LocalToken: true, - }, - { - TestName: "API gateway token", - TokenFlags: []string{"-create-api-gateway-token"}, - PolicyNames: []string{"api-gateway-controller-token"}, - PolicyDCs: []string{"dc1"}, - SecretNames: []string{resourcePrefix + "-api-gateway-controller-acl-token"}, - LocalToken: true, - }, - { - TestName: "Mesh gateway token", - TokenFlags: []string{"-create-mesh-gateway-token"}, - PolicyNames: []string{"mesh-gateway-token"}, - PolicyDCs: nil, - SecretNames: []string{resourcePrefix + "-mesh-gateway-acl-token"}, - LocalToken: false, - }, - { - TestName: "Ingress gateway tokens", - TokenFlags: []string{"-ingress-gateway-name=ingress", - "-ingress-gateway-name=gateway", - "-ingress-gateway-name=another-gateway"}, - PolicyNames: []string{"ingress-ingress-gateway-token", - "gateway-ingress-gateway-token", - "another-gateway-ingress-gateway-token"}, - PolicyDCs: []string{"dc1"}, - SecretNames: []string{resourcePrefix + "-ingress-ingress-gateway-acl-token", - resourcePrefix + "-gateway-ingress-gateway-acl-token", - resourcePrefix + "-another-gateway-ingress-gateway-acl-token"}, - LocalToken: true, - }, - { - TestName: "Terminating gateway tokens", - TokenFlags: []string{"-terminating-gateway-name=terminating", - "-terminating-gateway-name=gateway", - "-terminating-gateway-name=another-gateway"}, - PolicyNames: []string{"terminating-terminating-gateway-token", - "gateway-terminating-gateway-token", - "another-gateway-terminating-gateway-token"}, - PolicyDCs: []string{"dc1"}, - SecretNames: []string{resourcePrefix + "-terminating-terminating-gateway-acl-token", - resourcePrefix + "-gateway-terminating-gateway-acl-token", - resourcePrefix + "-another-gateway-terminating-gateway-acl-token"}, - LocalToken: true, - }, { TestName: "ACL replication token", TokenFlags: []string{"-create-acl-replication-token"}, @@ -244,29 +178,12 @@ func TestRun_TokensPrimaryDC(t *testing.T) { SecretNames: []string{resourcePrefix + "-acl-replication-acl-token"}, LocalToken: false, }, - { - TestName: "Controller token", - TokenFlags: []string{"-create-controller-token"}, - PolicyNames: []string{"controller-token"}, - PolicyDCs: nil, - SecretNames: []string{resourcePrefix + "-controller-acl-token"}, - LocalToken: false, - }, - { - TestName: "Endpoints Controller ACL token", - TokenFlags: []string{"-create-inject-token"}, - PolicyNames: []string{"connect-inject-token"}, - PolicyDCs: []string{"dc1"}, - SecretNames: []string{resourcePrefix + "-connect-inject-acl-token"}, - LocalToken: true, - }, } for _, c := range cases { t.Run(c.TestName, func(t *testing.T) { k8s, testSvr := completeSetup(t) setUpK8sServiceAccount(t, k8s, ns) defer testSvr.Stop() - require := require.New(t) // Run the command. ui := cli.NewMockUi() @@ -284,7 +201,7 @@ func TestRun_TokensPrimaryDC(t *testing.T) { }, c.TokenFlags...) responseCode := cmd.Run(cmdArgs) - require.Equal(0, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) // Check that the expected policy was created. bootToken := getBootToken(t, k8s, resourcePrefix, ns) @@ -292,24 +209,24 @@ func TestRun_TokensPrimaryDC(t *testing.T) { Address: testSvr.HTTPAddr, Token: bootToken, }) - require.NoError(err) + require.NoError(t, err) for i := range c.PolicyNames { policy := policyExists(t, c.PolicyNames[i], consul) - require.Equal(c.PolicyDCs, policy.Datacenters) + require.Equal(t, c.PolicyDCs, policy.Datacenters) // Test that the token was created as a Kubernetes Secret. tokenSecret, err := k8s.CoreV1().Secrets(ns).Get(context.Background(), c.SecretNames[i], metav1.GetOptions{}) - require.NoError(err) - require.NotNil(tokenSecret) + require.NoError(t, err) + require.NotNil(t, tokenSecret) token, ok := tokenSecret.Data["token"] - require.True(ok) + require.True(t, ok) // Test that the token has the expected policies in Consul. tokenData, _, err := consul.ACL().TokenReadSelf(&api.QueryOptions{Token: string(token)}) - require.NoError(err) - require.Equal(c.PolicyNames[i], tokenData.Policies[0].Name) - require.Equal(c.LocalToken, tokenData.Local) + require.NoError(t, err) + require.Equal(t, c.PolicyNames[i], tokenData.Policies[0].Name) + require.Equal(t, c.LocalToken, tokenData.Local) } // Test that if the same command is run again, it doesn't error. @@ -321,7 +238,7 @@ func TestRun_TokensPrimaryDC(t *testing.T) { } cmd.init() responseCode := cmd.Run(cmdArgs) - require.Equal(0, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) }) }) } @@ -332,11 +249,11 @@ func TestRun_ReplicationTokenPrimaryDC_WithProvidedSecretID(t *testing.T) { k8s, testSvr := completeSetup(t) defer testSvr.Stop() - require := require.New(t) + setUpK8sServiceAccount(t, k8s, ns) replicationToken := "123e4567-e89b-12d3-a456-426614174000" replicationTokenFile, err := ioutil.TempFile("", "replicationtoken") - require.NoError(err) + require.NoError(t, err) defer os.Remove(replicationTokenFile.Name()) replicationTokenFile.WriteString(replicationToken) @@ -358,24 +275,24 @@ func TestRun_ReplicationTokenPrimaryDC_WithProvidedSecretID(t *testing.T) { } responseCode := cmd.Run(cmdArgs) - require.Equal(0, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) // Check that this token is created. consul, err := api.NewClient(&api.Config{ Address: testSvr.HTTPAddr, Token: replicationToken, }) - require.NoError(err) + require.NoError(t, err) token, _, err := consul.ACL().TokenReadSelf(nil) - require.NoError(err) + require.NoError(t, err) for _, policyLink := range token.Policies { policy := policyExists(t, policyLink.Name, consul) - require.Nil(policy.Datacenters) + require.Nil(t, policy.Datacenters) // Test that the token was not created as a Kubernetes Secret. _, err := k8s.CoreV1().Secrets(ns).Get(context.Background(), resourcePrefix+"-acl-replication-acl-token", metav1.GetOptions{}) - require.True(k8serrors.IsNotFound(err)) + require.True(t, k8serrors.IsNotFound(err)) } // Test that if the same command is run again, it doesn't error. @@ -387,7 +304,7 @@ func TestRun_ReplicationTokenPrimaryDC_WithProvidedSecretID(t *testing.T) { } cmd.init() responseCode = cmd.Run(cmdArgs) - require.Equal(0, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) }) } @@ -403,22 +320,6 @@ func TestRun_TokensReplicatedDC(t *testing.T) { SecretNames []string LocalToken bool }{ - { - TestName: "Client token", - TokenFlags: []string{"-create-client-token"}, - PolicyNames: []string{"client-token-dc2"}, - PolicyDCs: []string{"dc2"}, - SecretNames: []string{resourcePrefix + "-client-acl-token"}, - LocalToken: true, - }, - { - TestName: "Sync token", - TokenFlags: []string{"-create-sync-token"}, - PolicyNames: []string{"catalog-sync-token-dc2"}, - PolicyDCs: []string{"dc2"}, - SecretNames: []string{resourcePrefix + "-catalog-sync-acl-token"}, - LocalToken: true, - }, { TestName: "Enterprise license token", TokenFlags: []string{"-create-enterprise-license-token"}, @@ -427,74 +328,6 @@ func TestRun_TokensReplicatedDC(t *testing.T) { SecretNames: []string{resourcePrefix + "-enterprise-license-acl-token"}, LocalToken: true, }, - { - TestName: "Snapshot agent token", - TokenFlags: []string{"-create-snapshot-agent-token"}, - PolicyNames: []string{"client-snapshot-agent-token-dc2"}, - PolicyDCs: []string{"dc2"}, - SecretNames: []string{resourcePrefix + "-client-snapshot-agent-acl-token"}, - LocalToken: true, - }, - { - TestName: "API Gateway token", - TokenFlags: []string{"-create-api-gateway-token"}, - PolicyNames: []string{"api-gateway-controller-token-dc2"}, - PolicyDCs: []string{"dc2"}, - SecretNames: []string{resourcePrefix + "-api-gateway-controller-acl-token"}, - LocalToken: true, - }, - { - TestName: "Mesh gateway token", - TokenFlags: []string{"-create-mesh-gateway-token"}, - PolicyNames: []string{"mesh-gateway-token-dc2"}, - PolicyDCs: nil, - SecretNames: []string{resourcePrefix + "-mesh-gateway-acl-token"}, - LocalToken: false, - }, - { - TestName: "Ingress gateway tokens", - TokenFlags: []string{"-ingress-gateway-name=ingress", - "-ingress-gateway-name=gateway", - "-ingress-gateway-name=another-gateway"}, - PolicyNames: []string{"ingress-ingress-gateway-token-dc2", - "gateway-ingress-gateway-token-dc2", - "another-gateway-ingress-gateway-token-dc2"}, - PolicyDCs: []string{"dc2"}, - SecretNames: []string{resourcePrefix + "-ingress-ingress-gateway-acl-token", - resourcePrefix + "-gateway-ingress-gateway-acl-token", - resourcePrefix + "-another-gateway-ingress-gateway-acl-token"}, - LocalToken: true, - }, - { - TestName: "Terminating gateway tokens", - TokenFlags: []string{"-terminating-gateway-name=terminating", - "-terminating-gateway-name=gateway", - "-terminating-gateway-name=another-gateway"}, - PolicyNames: []string{"terminating-terminating-gateway-token-dc2", - "gateway-terminating-gateway-token-dc2", - "another-gateway-terminating-gateway-token-dc2"}, - PolicyDCs: []string{"dc2"}, - SecretNames: []string{resourcePrefix + "-terminating-terminating-gateway-acl-token", - resourcePrefix + "-gateway-terminating-gateway-acl-token", - resourcePrefix + "-another-gateway-terminating-gateway-acl-token"}, - LocalToken: true, - }, - { - TestName: "Endpoints controller ACL token", - TokenFlags: []string{"-create-inject-token"}, - PolicyNames: []string{"connect-inject-token-dc2"}, - PolicyDCs: []string{"dc2"}, - SecretNames: []string{resourcePrefix + "-connect-inject-acl-token"}, - LocalToken: true, - }, - { - TestName: "Controller token", - TokenFlags: []string{"-create-controller-token"}, - PolicyNames: []string{"controller-token-dc2"}, - PolicyDCs: nil, - SecretNames: []string{resourcePrefix + "-controller-acl-token"}, - LocalToken: false, - }, } for _, c := range cases { t.Run(c.TestName, func(t *testing.T) { @@ -559,84 +392,18 @@ func TestRun_TokensWithProvidedBootstrapToken(t *testing.T) { PolicyNames []string SecretNames []string }{ - { - TestName: "Client token", - TokenFlags: []string{"-create-client-token"}, - PolicyNames: []string{"client-token"}, - SecretNames: []string{resourcePrefix + "-client-acl-token"}, - }, - { - TestName: "Endpoints controller ACL token", - TokenFlags: []string{"-create-inject-token"}, - PolicyNames: []string{"connect-inject-token"}, - SecretNames: []string{resourcePrefix + "-connect-inject-acl-token"}, - }, - { - TestName: "Sync token", - TokenFlags: []string{"-create-sync-token"}, - PolicyNames: []string{"catalog-sync-token"}, - SecretNames: []string{resourcePrefix + "-catalog-sync-acl-token"}, - }, { TestName: "Enterprise license token", TokenFlags: []string{"-create-enterprise-license-token"}, PolicyNames: []string{"enterprise-license-token"}, SecretNames: []string{resourcePrefix + "-enterprise-license-acl-token"}, }, - { - TestName: "Snapshot agent token", - TokenFlags: []string{"-create-snapshot-agent-token"}, - PolicyNames: []string{"client-snapshot-agent-token"}, - SecretNames: []string{resourcePrefix + "-client-snapshot-agent-acl-token"}, - }, - { - TestName: "API Gateway token", - TokenFlags: []string{"-create-api-gateway-token"}, - PolicyNames: []string{"api-gateway-controller-token"}, - SecretNames: []string{resourcePrefix + "-api-gateway-controller-acl-token"}, - }, - { - TestName: "Mesh gateway token", - TokenFlags: []string{"-create-mesh-gateway-token"}, - PolicyNames: []string{"mesh-gateway-token"}, - SecretNames: []string{resourcePrefix + "-mesh-gateway-acl-token"}, - }, - { - TestName: "Ingress gateway tokens", - TokenFlags: []string{"-ingress-gateway-name=ingress", - "-ingress-gateway-name=gateway", - "-ingress-gateway-name=another-gateway"}, - PolicyNames: []string{"ingress-ingress-gateway-token", - "gateway-ingress-gateway-token", - "another-gateway-ingress-gateway-token"}, - SecretNames: []string{resourcePrefix + "-ingress-ingress-gateway-acl-token", - resourcePrefix + "-gateway-ingress-gateway-acl-token", - resourcePrefix + "-another-gateway-ingress-gateway-acl-token"}, - }, - { - TestName: "Terminating gateway tokens", - TokenFlags: []string{"-terminating-gateway-name=terminating", - "-terminating-gateway-name=gateway", - "-terminating-gateway-name=another-gateway"}, - PolicyNames: []string{"terminating-terminating-gateway-token", - "gateway-terminating-gateway-token", - "another-gateway-terminating-gateway-token"}, - SecretNames: []string{resourcePrefix + "-terminating-terminating-gateway-acl-token", - resourcePrefix + "-gateway-terminating-gateway-acl-token", - resourcePrefix + "-another-gateway-terminating-gateway-acl-token"}, - }, { TestName: "ACL replication token", TokenFlags: []string{"-create-acl-replication-token"}, PolicyNames: []string{"acl-replication-token"}, SecretNames: []string{resourcePrefix + "-acl-replication-acl-token"}, }, - { - TestName: "Controller token", - TokenFlags: []string{"-create-controller-token"}, - PolicyNames: []string{"controller-token"}, - SecretNames: []string{resourcePrefix + "-controller-acl-token"}, - }, } for _, c := range cases { t.Run(c.TestName, func(t *testing.T) { @@ -713,33 +480,13 @@ func TestRun_AnonymousTokenPolicy(t *testing.T) { SecondaryDC: true, ExpAnonymousPolicy: false, }, - "auth method, primary dc, no replication (deprecated)": { - Flags: []string{"-create-inject-auth-method"}, - SecondaryDC: false, - ExpAnonymousPolicy: false, - }, - "auth method, primary dc, with federation": { - Flags: []string{"-create-inject-auth-method", "-federation"}, - SecondaryDC: false, - ExpAnonymousPolicy: true, - }, - "auth method, secondary dc, with federation": { - Flags: []string{"-create-inject-auth-method", "-federation"}, - SecondaryDC: true, - ExpAnonymousPolicy: false, - }, - "auth method, secondary dc (deprecated)": { - Flags: []string{"-create-inject-auth-method"}, - SecondaryDC: true, - ExpAnonymousPolicy: false, - }, "auth method, primary dc, no replication": { - Flags: []string{"-create-inject-token"}, + Flags: []string{"-connect-inject"}, SecondaryDC: false, ExpAnonymousPolicy: false, }, "auth method, secondary dc": { - Flags: []string{"-create-inject-token"}, + Flags: []string{"-connect-inject"}, SecondaryDC: true, ExpAnonymousPolicy: false, }, @@ -840,25 +587,14 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { flags []string expectedHost string }{ - "-create-inject-token flag": { - flags: []string{"-create-inject-token"}, - expectedHost: "https://kubernetes.default.svc", - }, - "-create-inject-auth-method flag": { - flags: []string{"-create-inject-auth-method"}, + "-connect-inject flag": { + flags: []string{"-connect-inject"}, expectedHost: "https://kubernetes.default.svc", }, - "-inject-auth-method-host flag (deprecated)": { - flags: []string{ - "-create-inject-auth-method", - "-inject-auth-method-host=https://my-kube.com", - }, - expectedHost: "https://my-kube.com", - }, - "-inject-auth-method-host flag": { + "-auth-method-host flag": { flags: []string{ - "-create-inject-token", - "-inject-auth-method-host=https://my-kube.com", + "-connect-inject", + "-auth-method-host=https://my-kube.com", }, expectedHost: "https://my-kube.com", }, @@ -869,7 +605,6 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { k8s, testSvr := completeSetup(t) defer testSvr.Stop() caCert, jwtToken := setUpK8sServiceAccount(t, k8s, ns) - require := require.New(t) // Run the command. ui := cli.NewMockUi() @@ -889,32 +624,32 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { } cmdArgs = append(cmdArgs, c.flags...) responseCode := cmd.Run(cmdArgs) - require.Equal(0, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) // Check that the auth method was created. bootToken := getBootToken(t, k8s, resourcePrefix, ns) consul, err := api.NewClient(&api.Config{ Address: testSvr.HTTPAddr, }) - require.NoError(err) + require.NoError(t, err) authMethodName := resourcePrefix + "-k8s-auth-method" authMethod, _, err := consul.ACL().AuthMethodRead(authMethodName, &api.QueryOptions{Token: bootToken}) - require.NoError(err) - require.Contains(authMethod.Config, "Host") - require.Equal(authMethod.Config["Host"], c.expectedHost) - require.Contains(authMethod.Config, "CACert") - require.Equal(authMethod.Config["CACert"], caCert) - require.Contains(authMethod.Config, "ServiceAccountJWT") - require.Equal(authMethod.Config["ServiceAccountJWT"], jwtToken) + require.NoError(t, err) + require.Contains(t, authMethod.Config, "Host") + require.Equal(t, authMethod.Config["Host"], c.expectedHost) + require.Contains(t, authMethod.Config, "CACert") + require.Equal(t, authMethod.Config["CACert"], caCert) + require.Contains(t, authMethod.Config, "ServiceAccountJWT") + require.Equal(t, authMethod.Config["ServiceAccountJWT"], jwtToken) // Check that the binding rule was created. rules, _, err := consul.ACL().BindingRuleList(authMethodName, &api.QueryOptions{Token: bootToken}) - require.NoError(err) - require.Len(rules, 1) - require.Equal("service", string(rules[0].BindType)) - require.Equal("${serviceaccount.name}", rules[0].BindName) - require.Equal(bindingRuleSelector, rules[0].Selector) + require.NoError(t, err) + require.Len(t, rules, 1) + require.Equal(t, "service", string(rules[0].BindType)) + require.Equal(t, "${serviceaccount.name}", rules[0].BindName) + require.Equal(t, bindingRuleSelector, rules[0].Selector) // Test that if the same command is re-run it doesn't error. t.Run("retried", func(t *testing.T) { @@ -925,7 +660,7 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { } cmd.init() responseCode := cmd.Run(cmdArgs) - require.Equal(0, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) }) }) } @@ -936,196 +671,179 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { func TestRun_ConnectInjectAuthMethodUpdates(t *testing.T) { t.Parallel() - // Test with deprecated -create-inject-auth-method flag. - cases := []string{"-create-inject-auth-method", "-create-inject-token"} - for _, flag := range cases { - t.Run(flag, func(t *testing.T) { - - k8s, testSvr := completeSetup(t) - defer testSvr.Stop() - caCert, jwtToken := setUpK8sServiceAccount(t, k8s, ns) - require := require.New(t) - - ui := cli.NewMockUi() - cmd := Command{ - UI: ui, - clientset: k8s, - } - - bindingRuleSelector := "serviceaccount.name!=default" + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() + caCert, jwtToken := setUpK8sServiceAccount(t, k8s, ns) - // First, create an auth method using the defaults - responseCode := cmd.Run([]string{ - "-timeout=1m", - "-resource-prefix=" + resourcePrefix, - "-k8s-namespace=" + ns, - "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], - "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], - flag, - "-acl-binding-rule-selector=" + bindingRuleSelector, - }) - require.Equal(0, responseCode, ui.ErrorWriter.String()) + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } - // Check that the auth method was created. - bootToken := getBootToken(t, k8s, resourcePrefix, ns) - consul, err := api.NewClient(&api.Config{ - Address: testSvr.HTTPAddr, - }) - require.NoError(err) - authMethodName := resourcePrefix + "-k8s-auth-method" - authMethod, _, err := consul.ACL().AuthMethodRead(authMethodName, - &api.QueryOptions{Token: bootToken}) - require.NoError(err) - require.NotNil(authMethod) - require.Contains(authMethod.Config, "Host") - require.Equal(authMethod.Config["Host"], defaultKubernetesHost) - require.Contains(authMethod.Config, "CACert") - require.Equal(authMethod.Config["CACert"], caCert) - require.Contains(authMethod.Config, "ServiceAccountJWT") - require.Equal(authMethod.Config["ServiceAccountJWT"], jwtToken) - - // Generate a new CA certificate - _, _, caCertPem, _, err := cert.GenerateCA("kubernetes") - require.NoError(err) - - // Overwrite the default kubernetes api, service account token and CA cert - kubernetesHost := "https://kubernetes.example.com" - // This token is the base64 encoded example token from jwt.io - serviceAccountToken = "ZXlKaGJHY2lPaUpJVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SnpkV0lpT2lJeE1qTTBOVFkzT0Rrd0lpd2libUZ0WlNJNklrcHZhRzRnUkc5bElpd2lhV0YwSWpveE5URTJNak01TURJeWZRLlNmbEt4d1JKU01lS0tGMlFUNGZ3cE1lSmYzNlBPazZ5SlZfYWRRc3N3NWM=" - serviceAccountCACert = base64.StdEncoding.EncodeToString([]byte(caCertPem)) - - // Create a new service account - updatedCACert, updatedJWTToken := setUpK8sServiceAccount(t, k8s, ns) - - // Run command again - responseCode = cmd.Run([]string{ - "-timeout=1m", - "-resource-prefix=" + resourcePrefix, - "-k8s-namespace=" + ns, - "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], - "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], - "-acl-binding-rule-selector=" + bindingRuleSelector, - flag, - "-inject-auth-method-host=" + kubernetesHost, - }) - require.Equal(0, responseCode, ui.ErrorWriter.String()) + bindingRuleSelector := "serviceaccount.name!=default" - // Check that the auth method has been updated - authMethod, _, err = consul.ACL().AuthMethodRead(authMethodName, - &api.QueryOptions{Token: bootToken}) - require.NoError(err) - require.NotNil(authMethod) - require.Contains(authMethod.Config, "Host") - require.Equal(authMethod.Config["Host"], kubernetesHost) - require.Contains(authMethod.Config, "CACert") - require.Equal(authMethod.Config["CACert"], updatedCACert) - require.Contains(authMethod.Config, "ServiceAccountJWT") - require.Equal(authMethod.Config["ServiceAccountJWT"], updatedJWTToken) - }) - } -} + // First, create an auth method using the defaults + responseCode := cmd.Run([]string{ + "-timeout=1m", + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace=" + ns, + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + "-connect-inject", + "-acl-binding-rule-selector=" + bindingRuleSelector, + }) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) -// Test that ACL binding rules are updated if the rule selector changes. -func TestRun_BindingRuleUpdates(tt *testing.T) { - tt.Parallel() + // Check that the auth method was created. + bootToken := getBootToken(t, k8s, resourcePrefix, ns) + consul, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + }) + require.NoError(t, err) + authMethodName := resourcePrefix + "-k8s-auth-method" + authMethod, _, err := consul.ACL().AuthMethodRead(authMethodName, + &api.QueryOptions{Token: bootToken}) + require.NoError(t, err) + require.NotNil(t, authMethod) + require.Contains(t, authMethod.Config, "Host") + require.Equal(t, authMethod.Config["Host"], defaultKubernetesHost) + require.Contains(t, authMethod.Config, "CACert") + require.Equal(t, authMethod.Config["CACert"], caCert) + require.Contains(t, authMethod.Config, "ServiceAccountJWT") + require.Equal(t, authMethod.Config["ServiceAccountJWT"], jwtToken) + + // Generate a new CA certificate + _, _, caCertPem, _, err := cert.GenerateCA("kubernetes") + require.NoError(t, err) - // Test with deprecated -create-inject-auth-method flag. - cases := []string{"-create-inject-auth-method", "-create-inject-token"} - for _, flag := range cases { - tt.Run(flag, func(t *testing.T) { - k8s, testSvr := completeSetup(t) - setUpK8sServiceAccount(t, k8s, ns) - defer testSvr.Stop() - require := require.New(t) + // Overwrite the default kubernetes api, service account token and CA cert + kubernetesHost := "https://kubernetes.example.com" + // This token is the base64 encoded example token from jwt.io + serviceAccountToken = "ZXlKaGJHY2lPaUpJVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SnpkV0lpT2lJeE1qTTBOVFkzT0Rrd0lpd2libUZ0WlNJNklrcHZhRzRnUkc5bElpd2lhV0YwSWpveE5URTJNak01TURJeWZRLlNmbEt4d1JKU01lS0tGMlFUNGZ3cE1lSmYzNlBPazZ5SlZfYWRRc3N3NWM=" + serviceAccountCACert = base64.StdEncoding.EncodeToString([]byte(caCertPem)) - consul, err := api.NewClient(&api.Config{ - Address: testSvr.HTTPAddr, - }) - require.NoError(err) + // Create a new service account + updatedCACert, updatedJWTToken := setUpK8sServiceAccount(t, k8s, ns) - ui := cli.NewMockUi() - commonArgs := []string{ - "-resource-prefix=" + resourcePrefix, - "-k8s-namespace=" + ns, - "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], - "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], - flag, - } - firstRunArgs := append(commonArgs, - "-acl-binding-rule-selector=serviceaccount.name!=default", - ) - // On the second run, we change the binding rule selector. - secondRunArgs := append(commonArgs, - "-acl-binding-rule-selector=serviceaccount.name!=changed", - ) - - // Run the command first to populate the binding rule. - cmd := Command{ - UI: ui, - clientset: k8s, - } - responseCode := cmd.Run(firstRunArgs) - require.Equal(0, responseCode, ui.ErrorWriter.String()) - - // Validate the binding rule. - { - queryOpts := &api.QueryOptions{Token: getBootToken(t, k8s, resourcePrefix, ns)} - authMethodName := resourcePrefix + "-k8s-auth-method" - rules, _, err := consul.ACL().BindingRuleList(authMethodName, queryOpts) - require.NoError(err) - require.Len(rules, 1) - actRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, queryOpts) - require.NoError(err) - require.NotNil(actRule) - require.Equal("Kubernetes binding rule", actRule.Description) - require.Equal(api.BindingRuleBindTypeService, actRule.BindType) - require.Equal("${serviceaccount.name}", actRule.BindName) - require.Equal("serviceaccount.name!=default", actRule.Selector) - } + // Run command again + responseCode = cmd.Run([]string{ + "-timeout=1m", + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace=" + ns, + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + "-acl-binding-rule-selector=" + bindingRuleSelector, + "-connect-inject", + "-auth-method-host=" + kubernetesHost, + }) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) - // Re-run the command with namespace flags. The policies should be updated. - // NOTE: We're redefining the command so that the old flag values are - // reset. - cmd = Command{ - UI: ui, - clientset: k8s, - } - responseCode = cmd.Run(secondRunArgs) - require.Equal(0, responseCode, ui.ErrorWriter.String()) - - // Check the binding rule is changed expected. - { - queryOpts := &api.QueryOptions{Token: getBootToken(t, k8s, resourcePrefix, ns)} - authMethodName := resourcePrefix + "-k8s-auth-method" - rules, _, err := consul.ACL().BindingRuleList(authMethodName, queryOpts) - require.NoError(err) - require.Len(rules, 1) - actRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, queryOpts) - require.NoError(err) - require.NotNil(actRule) - require.Equal("Kubernetes binding rule", actRule.Description) - require.Equal(api.BindingRuleBindTypeService, actRule.BindType) - require.Equal("${serviceaccount.name}", actRule.BindName) - require.Equal("serviceaccount.name!=changed", actRule.Selector) - } - }) - } + // Check that the auth method has been updated + authMethod, _, err = consul.ACL().AuthMethodRead(authMethodName, + &api.QueryOptions{Token: bootToken}) + require.NoError(t, err) + require.NotNil(t, authMethod) + require.Contains(t, authMethod.Config, "Host") + require.Equal(t, authMethod.Config["Host"], kubernetesHost) + require.Contains(t, authMethod.Config, "CACert") + require.Equal(t, authMethod.Config["CACert"], updatedCACert) + require.Contains(t, authMethod.Config, "ServiceAccountJWT") + require.Equal(t, authMethod.Config["ServiceAccountJWT"], updatedJWTToken) } -// Test that the catalog sync policy is updated if the Consul node name changes. -func TestRun_SyncPolicyUpdates(t *testing.T) { - t.Parallel() +// Test that ACL binding rules are updated if the rule selector changes. +func TestRun_BindingRuleUpdates(t *testing.T) { k8s, testSvr := completeSetup(t) + setUpK8sServiceAccount(t, k8s, ns) defer testSvr.Stop() - require := require.New(t) - ui := cli.NewMockUi() + consul, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + }) + require.NoError(t, err) + + ui := cli.NewMockUi() + commonArgs := []string{ + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace=" + ns, + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + "-connect-inject", + } + firstRunArgs := append(commonArgs, + "-acl-binding-rule-selector=serviceaccount.name!=default", + ) + // On the second run, we change the binding rule selector. + secondRunArgs := append(commonArgs, + "-acl-binding-rule-selector=serviceaccount.name!=changed", + ) + + // Run the command first to populate the binding rule. + cmd := Command{ + UI: ui, + clientset: k8s, + } + responseCode := cmd.Run(firstRunArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + // Validate the binding rule. + { + queryOpts := &api.QueryOptions{Token: getBootToken(t, k8s, resourcePrefix, ns)} + authMethodName := resourcePrefix + "-k8s-auth-method" + rules, _, err := consul.ACL().BindingRuleList(authMethodName, queryOpts) + require.NoError(t, err) + require.Len(t, rules, 1) + actRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, queryOpts) + require.NoError(t, err) + require.NotNil(t, actRule) + require.Equal(t, "Kubernetes binding rule", actRule.Description) + require.Equal(t, api.BindingRuleBindTypeService, actRule.BindType) + require.Equal(t, "${serviceaccount.name}", actRule.BindName) + require.Equal(t, "serviceaccount.name!=default", actRule.Selector) + } + + // Re-run the command with namespace flags. The policies should be updated. + // NOTE: We're redefining the command so that the old flag values are + // reset. + cmd = Command{ + UI: ui, + clientset: k8s, + } + responseCode = cmd.Run(secondRunArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + // Check the binding rule is changed expected. + { + queryOpts := &api.QueryOptions{Token: getBootToken(t, k8s, resourcePrefix, ns)} + authMethodName := resourcePrefix + "-k8s-auth-method" + rules, _, err := consul.ACL().BindingRuleList(authMethodName, queryOpts) + require.NoError(t, err) + require.Len(t, rules, 1) + actRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, queryOpts) + require.NoError(t, err) + require.NotNil(t, actRule) + require.Equal(t, "Kubernetes binding rule", actRule.Description) + require.Equal(t, api.BindingRuleBindTypeService, actRule.BindType) + require.Equal(t, "${serviceaccount.name}", actRule.BindName) + require.Equal(t, "serviceaccount.name!=changed", actRule.Selector) + } +} + +// Test that the catalog sync policy is updated if the Consul node name changes. +func TestRun_SyncPolicyUpdates(t *testing.T) { + t.Parallel() + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() + setUpK8sServiceAccount(t, k8s, ns) + + ui := cli.NewMockUi() commonArgs := []string{ "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], - "-create-sync-token", + "-sync-catalog", } firstRunArgs := append(commonArgs, "-sync-consul-node-name=k8s-sync", @@ -1141,7 +859,7 @@ func TestRun_SyncPolicyUpdates(t *testing.T) { clientset: k8s, } responseCode := cmd.Run(firstRunArgs) - require.Equal(0, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) // Create consul client bootToken := getBootToken(t, k8s, resourcePrefix, ns) @@ -1149,19 +867,19 @@ func TestRun_SyncPolicyUpdates(t *testing.T) { Address: testSvr.HTTPAddr, Token: bootToken, }) - require.NoError(err) + require.NoError(t, err) // Get and check the sync policy details firstPolicies, _, err := consul.ACL().PolicyList(nil) - require.NoError(err) + require.NoError(t, err) for _, p := range firstPolicies { - if p.Name == "catalog-sync-token" { + if p.Name == "sync-catalog-policy" { policy, _, err := consul.ACL().PolicyRead(p.ID, nil) - require.NoError(err) + require.NoError(t, err) // Check the node name in the policy - require.Contains(policy.Rules, "k8s-sync") + require.Contains(t, policy.Rules, "k8s-sync") } } @@ -1172,19 +890,19 @@ func TestRun_SyncPolicyUpdates(t *testing.T) { clientset: k8s, } responseCode = cmd.Run(secondRunArgs) - require.Equal(0, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) // Get and check the sync policy details secondPolicies, _, err := consul.ACL().PolicyList(nil) - require.NoError(err) + require.NoError(t, err) for _, p := range secondPolicies { - if p.Name == "catalog-sync-token" { + if p.Name == "sync-catalog-policy" { policy, _, err := consul.ACL().PolicyRead(p.ID, nil) - require.NoError(err) + require.NoError(t, err) // Check the node name in the policy - require.Contains(policy.Rules, "new-node-name") + require.Contains(t, policy.Rules, "new-node-name") } } } @@ -1197,7 +915,6 @@ func TestRun_SyncPolicyUpdates(t *testing.T) { // that we try to update will work for testing. func TestRun_ErrorsOnDuplicateACLPolicy(t *testing.T) { t.Parallel() - require := require.New(t) // Create Consul with ACLs already bootstrapped so that we can // then seed it with our manually created policy. @@ -1211,15 +928,15 @@ func TestRun_ErrorsOnDuplicateACLPolicy(t *testing.T) { Address: testAgent.HTTPAddr, Token: bootToken, }) - require.NoError(err) + require.NoError(t, err) // Create the policy manually. description := "not the expected description" policy, _, err := consul.ACL().PolicyCreate(&api.ACLPolicy{ - Name: "catalog-sync-token", + Name: "sync-catalog-policy", Description: description, }, nil) - require.NoError(err) + require.NoError(t, err) // Run the command. ui := cli.NewMockUi() @@ -1235,29 +952,28 @@ func TestRun_ErrorsOnDuplicateACLPolicy(t *testing.T) { "-k8s-namespace=" + ns, "-server-address", strings.Split(testAgent.HTTPAddr, ":")[0], "-server-port", strings.Split(testAgent.HTTPAddr, ":")[1], - "-create-sync-token", + "-sync-catalog", } responseCode := cmd.Run(cmdArgs) // We expect the command to time out. - require.Equal(1, responseCode) + require.Equal(t, 1, responseCode) // NOTE: Since the error is logged through the logger instead of the UI // there's no good way to test that we logged the expected error however // we also test this directly in create_or_update_test.go. // Check that the policy wasn't modified. rereadPolicy, _, err := consul.ACL().PolicyRead(policy.ID, nil) - require.NoError(err) - require.Equal(description, rereadPolicy.Description) + require.NoError(t, err) + require.Equal(t, description, rereadPolicy.Description) } // Test that if the servers aren't available at first that bootstrap // still succeeds. func TestRun_DelayedServers(t *testing.T) { t.Parallel() - require := require.New(t) k8s := fake.NewSimpleClientset() - + setUpK8sServiceAccount(t, k8s, ns) randomPorts := freeport.GetN(t, 6) ui := cli.NewMockUi() @@ -1303,7 +1019,7 @@ func TestRun_DelayedServers(t *testing.T) { Server: randomPorts[5], } }) - require.NoError(err) + require.NoError(t, err) close(testServerReady) }() @@ -1312,15 +1028,15 @@ func TestRun_DelayedServers(t *testing.T) { case <-testServerReady: defer srv.Stop() case <-time.After(5 * time.Second): - require.FailNow("test server took longer than 5s to come up") + require.FailNow(t, "test server took longer than 5s to come up") } // Wait for the command to exit. select { case <-done: - require.Equal(0, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) case <-time.After(5 * time.Second): - require.FailNow("command did not exit after 5s") + require.FailNow(t, "command did not exit after 5s") } // Test that the bootstrap kube secret is created. @@ -1331,10 +1047,10 @@ func TestRun_DelayedServers(t *testing.T) { Address: srv.HTTPAddr, Token: bootToken, }) - require.NoError(err) + require.NoError(t, err) tokenData, _, err := consul.ACL().TokenReadSelf(nil) - require.NoError(err) - require.Equal("global-management", tokenData.Policies[0].Name) + require.NoError(t, err) + require.Equal(t, "global-management", tokenData.Policies[0].Name) // Check that the agent policy was created. policyExists(t, "agent-token", consul) @@ -1343,8 +1059,8 @@ func TestRun_DelayedServers(t *testing.T) { // Test that if there's no leader, we retry until one is elected. func TestRun_NoLeader(t *testing.T) { t.Parallel() - require := require.New(t) k8s := fake.NewSimpleClientset() + setUpK8sServiceAccount(t, k8s, ns) type APICall struct { Method string @@ -1376,14 +1092,33 @@ func TestRun_NoLeader(t *testing.T) { fmt.Fprintln(w, `{"Config": {"Datacenter": "dc1", "PrimaryDatacenter": "dc1"}}`) case "/v1/acl/tokens": fmt.Fprintln(w, `[]`) + case "/v1/acl/token": + fmt.Fprintln(w, `{}`) + case "/v1/agent/token/agent": + fmt.Fprintln(w, `{}`) + case "/v1/acl/policy": + fmt.Fprintln(w, `{}`) + case "/v1/acl/auth-method": + fmt.Fprintln(w, `{}`) + case "/v1/acl/role": + fmt.Fprintln(w, `{}`) + case "/v1/acl/role/name/": + w.WriteHeader(404) + case "/v1/acl/role/name/release-name-consul-client-acl-role": + w.WriteHeader(404) + case "/v1/acl/binding-rules": + fmt.Fprintln(w, `[]`) + case "/v1/acl/binding-rule": + fmt.Fprintln(w, `{}`) default: - fmt.Fprintln(w, "{}") + w.WriteHeader(500) + fmt.Fprintln(w, "Mock Server not configured for this route: "+r.URL.Path) } })) defer consulServer.Close() serverURL, err := url.Parse(consulServer.URL) - require.NoError(err) + require.NoError(t, err) // Run the command. ui := cli.NewMockUi() @@ -1407,16 +1142,16 @@ func TestRun_NoLeader(t *testing.T) { select { case <-done: - require.Equal(0, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) case <-time.After(5 * time.Second): - require.FailNow("command did not complete within 5s") + require.FailNow(t, "command did not complete within 5s") } // Test that the bootstrap kube secret is created. getBootToken(t, k8s, resourcePrefix, ns) // Test that the expected API calls were made. - require.Equal([]APICall{ + require.Equal(t, []APICall{ // Bootstrap will have been called 3 times. { "PUT", @@ -1450,13 +1185,29 @@ func TestRun_NoLeader(t *testing.T) { "GET", "/v1/agent/self", }, + { + "PUT", + "/v1/acl/auth-method", + }, { "PUT", "/v1/acl/policy", }, + { + "GET", + "/v1/acl/role/name/release-name-consul-client-acl-role", + }, { "PUT", - "/v1/acl/token", + "/v1/acl/role", + }, + { + "GET", + "/v1/acl/binding-rules", + }, + { + "PUT", + "/v1/acl/binding-rule", }, }, consulAPICalls) } @@ -1558,11 +1309,11 @@ func TestConsulDatacenterList(t *testing.T) { } } -// Test that if creating client tokens fails at first, we retry. -func TestRun_ClientTokensRetry(t *testing.T) { +// Test that if creating client policy and binding rule fails at first, we retry. +func TestRun_ClientPolicyAndBindingRuleRetry(t *testing.T) { t.Parallel() - require := require.New(t) k8s := fake.NewSimpleClientset() + setUpK8sServiceAccount(t, k8s, ns) type APICall struct { Method string @@ -1594,14 +1345,33 @@ func TestRun_ClientTokensRetry(t *testing.T) { fmt.Fprintln(w, `{"Config": {"Datacenter": "dc1", "PrimaryDatacenter": "dc1"}}`) case "/v1/acl/tokens": fmt.Fprintln(w, `[]`) + case "/v1/acl/token": + fmt.Fprintln(w, `{}`) + case "/v1/acl/bootstrap": + fmt.Fprintln(w, `{}`) + case "/v1/agent/token/agent": + fmt.Fprintln(w, `{}`) + case "/v1/acl/auth-method": + fmt.Fprintln(w, `{}`) + case "/v1/acl/role": + fmt.Fprintln(w, `{}`) + case "/v1/acl/role/name/": + w.WriteHeader(404) + case "/v1/acl/role/name/release-name-consul-client-acl-role": + w.WriteHeader(404) + case "/v1/acl/binding-rules": + fmt.Fprintln(w, `[]`) + case "/v1/acl/binding-rule": + fmt.Fprintln(w, `{}`) default: - fmt.Fprintln(w, "{}") + w.WriteHeader(500) + fmt.Fprintln(w, "Mock Server not configured for this route: "+r.URL.Path) } })) defer consulServer.Close() serverURL, err := url.Parse(consulServer.URL) - require.NoError(err) + require.NoError(t, err) // Run the command. ui := cli.NewMockUi() @@ -1616,10 +1386,10 @@ func TestRun_ClientTokensRetry(t *testing.T) { "-server-address=" + serverURL.Hostname(), "-server-port=" + serverURL.Port(), }) - require.Equal(0, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) // Test that the expected API calls were made. - require.Equal([]APICall{ + require.Equal(t, []APICall{ { "PUT", "/v1/acl/bootstrap", @@ -1644,6 +1414,10 @@ func TestRun_ClientTokensRetry(t *testing.T) { "GET", "/v1/agent/self", }, + { + "PUT", + "/v1/acl/auth-method", + }, // This call should happen twice since the first will fail. { "PUT", @@ -1653,9 +1427,21 @@ func TestRun_ClientTokensRetry(t *testing.T) { "PUT", "/v1/acl/policy", }, + { + "GET", + "/v1/acl/role/name/release-name-consul-client-acl-role", + }, { "PUT", - "/v1/acl/token", + "/v1/acl/role", + }, + { + "GET", + "/v1/acl/binding-rules", + }, + { + "PUT", + "/v1/acl/binding-rule", }, }, consulAPICalls) } @@ -1664,15 +1450,286 @@ func TestRun_ClientTokensRetry(t *testing.T) { // server tokens. func TestRun_AlreadyBootstrapped(t *testing.T) { t.Parallel() - require := require.New(t) + cases := map[string]bool{ + "token saved in k8s secret": true, + "token provided via file": false, + } + + for name, tokenFromK8sSecret := range cases { + t.Run(name, func(t *testing.T) { + k8s := fake.NewSimpleClientset() + + type APICall struct { + Method string + Path string + } + var consulAPICalls []APICall + + // Start the Consul server. + consulServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Record all the API calls made. + consulAPICalls = append(consulAPICalls, APICall{ + Method: r.Method, + Path: r.URL.Path, + }) + switch r.URL.Path { + case "/v1/agent/self": + fmt.Fprintln(w, `{"Config": {"Datacenter": "dc1", "PrimaryDatacenter": "dc1"}}`) + case "/v1/acl/tokens": + fmt.Fprintln(w, `[]`) + case "/v1/acl/token": + fmt.Fprintln(w, `{}`) + case "/v1/acl/policy": + fmt.Fprintln(w, `{}`) + case "/v1/agent/token/acl_agent_token": + fmt.Fprintln(w, `{}`) + case "/v1/acl/auth-method": + fmt.Fprintln(w, `{}`) + case "/v1/acl/role/name/release-name-consul-client-acl-role": + w.WriteHeader(404) + case "/v1/acl/role": + fmt.Fprintln(w, `{}`) + case "/v1/acl/binding-rules": + fmt.Fprintln(w, `[]`) + case "/v1/acl/binding-rule": + fmt.Fprintln(w, `{}`) + default: + w.WriteHeader(500) + fmt.Fprintln(w, "Mock Server not configured for this route: "+r.URL.Path) + } + })) + defer consulServer.Close() + + serverURL, err := url.Parse(consulServer.URL) + require.NoError(t, err) + setUpK8sServiceAccount(t, k8s, ns) + + cmdArgs := []string{ + "-timeout=500ms", + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace=" + ns, + "-server-address=" + serverURL.Hostname(), + "-server-port=" + serverURL.Port(), + } + + // Create the bootstrap secret. + if tokenFromK8sSecret { + _, err = k8s.CoreV1().Secrets(ns).Create( + context.Background(), + &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourcePrefix + "-bootstrap-acl-token", + Labels: map[string]string{common.CLILabelKey: common.CLILabelValue}, + }, + Data: map[string][]byte{ + "token": []byte("old-token"), + }, + }, + metav1.CreateOptions{}) + require.NoError(t, err) + } else { + // Write token to a file. + bootTokenFile, err := ioutil.TempFile("", "") + require.NoError(t, err) + defer os.Remove(bootTokenFile.Name()) + + _, err = bootTokenFile.WriteString("old-token") + require.NoError(t, err) + + require.NoError(t, err) + cmdArgs = append(cmdArgs, "-bootstrap-token-file", bootTokenFile.Name()) + } + + // Run the command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + + responseCode := cmd.Run(cmdArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + // Test that the Secret is the same. + if tokenFromK8sSecret { + secret, err := k8s.CoreV1().Secrets(ns).Get(context.Background(), resourcePrefix+"-bootstrap-acl-token", metav1.GetOptions{}) + require.NoError(t, err) + require.Contains(t, secret.Data, "token") + require.Equal(t, "old-token", string(secret.Data["token"])) + } + + // Test that the expected API calls were made. + require.Equal(t, []APICall{ + // We expect calls for updating the server policy, setting server tokens, + // and updating client policy. + { + "PUT", + "/v1/acl/policy", + }, + { + "GET", + "/v1/acl/tokens", + }, + { + "PUT", + "/v1/acl/token", + }, + { + "PUT", + "/v1/agent/token/agent", + }, + { + "PUT", + "/v1/agent/token/acl_agent_token", + }, + { + "GET", + "/v1/agent/self", + }, + { + "PUT", + "/v1/acl/auth-method", + }, + { + "PUT", + "/v1/acl/policy", + }, + { + "GET", + "/v1/acl/role/name/release-name-consul-client-acl-role", + }, + { + "PUT", + "/v1/acl/role", + }, + { + "GET", + "/v1/acl/binding-rules", + }, + { + "PUT", + "/v1/acl/binding-rule", + }, + }, consulAPICalls) + }) + } +} + +// Test if there is an old bootstrap Secret and the server token exists +// that we don't try and recreate the token. +func TestRun_AlreadyBootstrapped_ServerTokenExists(t *testing.T) { + t.Parallel() + cases := map[string]bool{ + "token saved in k8s secret": true, + "token provided via file": false, + } + + for name, tokenInK8sSecret := range cases { + t.Run(name, func(t *testing.T) { + + // First set everything up with ACLs bootstrapped. + bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" + k8s, testAgent := completeBootstrappedSetup(t, bootToken) + setUpK8sServiceAccount(t, k8s, ns) + defer testAgent.Stop() + + cmdArgs := []string{ + "-timeout=1m", + "-k8s-namespace", ns, + "-server-address", strings.Split(testAgent.HTTPAddr, ":")[0], + "-server-port", strings.Split(testAgent.HTTPAddr, ":")[1], + "-resource-prefix", resourcePrefix, + } + + if tokenInK8sSecret { + _, err := k8s.CoreV1().Secrets(ns).Create(context.Background(), &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourcePrefix + "-bootstrap-acl-token", + }, + Data: map[string][]byte{ + "token": []byte(bootToken), + }, + }, metav1.CreateOptions{}) + require.NoError(t, err) + } else { + // Write token to a file. + bootTokenFile, err := ioutil.TempFile("", "") + require.NoError(t, err) + defer os.Remove(bootTokenFile.Name()) + + _, err = bootTokenFile.WriteString(bootToken) + require.NoError(t, err) + + require.NoError(t, err) + cmdArgs = append(cmdArgs, "-bootstrap-token-file", bootTokenFile.Name()) + } + + consulClient, err := api.NewClient(&api.Config{ + Address: testAgent.HTTPAddr, + Token: bootToken, + }) + require.NoError(t, err) + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + + // Create the server policy and token _before_ we run the command. + agentPolicyRules, err := cmd.agentRules() + require.NoError(t, err) + policy, _, err := consulClient.ACL().PolicyCreate(&api.ACLPolicy{ + Name: "agent-token", + Description: "Agent Token Policy", + Rules: agentPolicyRules, + }, nil) + require.NoError(t, err) + _, _, err = consulClient.ACL().TokenCreate(&api.ACLToken{ + Description: fmt.Sprintf("Server Token for %s", strings.Split(testAgent.HTTPAddr, ":")[0]), + Policies: []*api.ACLTokenPolicyLink{ + { + Name: policy.Name, + }, + }, + }, nil) + require.NoError(t, err) + + // Run the command. + responseCode := cmd.Run(cmdArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + // Check that only one server token exists, i.e. it didn't create an + // extra token. + tokens, _, err := consulClient.ACL().TokenList(nil) + require.NoError(t, err) + count := 0 + for _, token := range tokens { + if len(token.Policies) == 1 && token.Policies[0].Name == policy.Name { + count++ + } + } + require.Equal(t, 1, count) + }) + } +} + +// Test if -set-server-tokens is false (i.e. servers are disabled), we skip bootstrapping of the servers +// and continue on to the next step. +func TestRun_SkipBootstrapping_WhenServersAreDisabled(t *testing.T) { + t.Parallel() k8s := fake.NewSimpleClientset() + bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" + tokenFile := common.WriteTempFile(t, bootToken) + type APICall struct { Method string Path string } var consulAPICalls []APICall + setUpK8sServiceAccount(t, k8s, ns) + // Start the Consul server. consulServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Record all the API calls made. @@ -1683,8 +1740,6 @@ func TestRun_AlreadyBootstrapped(t *testing.T) { switch r.URL.Path { case "/v1/agent/self": fmt.Fprintln(w, `{"Config": {"Datacenter": "dc1", "PrimaryDatacenter": "dc1"}}`) - case "/v1/acl/tokens": - fmt.Fprintln(w, `[]`) default: // Send an empty JSON response with code 200 to all calls. fmt.Fprintln(w, "{}") @@ -1693,22 +1748,7 @@ func TestRun_AlreadyBootstrapped(t *testing.T) { defer consulServer.Close() serverURL, err := url.Parse(consulServer.URL) - require.NoError(err) - - // Create the bootstrap secret. - _, err = k8s.CoreV1().Secrets(ns).Create( - context.Background(), - &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourcePrefix + "-bootstrap-acl-token", - Labels: map[string]string{common.CLILabelKey: common.CLILabelValue}, - }, - Data: map[string][]byte{ - "token": []byte("old-token"), - }, - }, - metav1.CreateOptions{}) - require.NoError(err) + require.NoError(t, err) // Run the command. ui := cli.NewMockUi() @@ -1716,193 +1756,29 @@ func TestRun_AlreadyBootstrapped(t *testing.T) { UI: ui, clientset: k8s, } - responseCode := cmd.Run([]string{ "-timeout=500ms", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, "-server-address=" + serverURL.Hostname(), "-server-port=" + serverURL.Port(), + "-bootstrap-token-file=" + tokenFile, + "-set-server-tokens=false", + "-client=false", // disable client token, so there are fewer calls }) - require.Equal(0, responseCode, ui.ErrorWriter.String()) - - // Test that the Secret is the same. - secret, err := k8s.CoreV1().Secrets(ns).Get(context.Background(), resourcePrefix+"-bootstrap-acl-token", metav1.GetOptions{}) - require.NoError(err) - require.Contains(secret.Data, "token") - require.Equal("old-token", string(secret.Data["token"])) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) // Test that the expected API calls were made. - require.Equal([]APICall{ - // We expect calls for updating the server policy, setting server tokens, - // and updating client policy. - { - "PUT", - "/v1/acl/policy", - }, - { - "GET", - "/v1/acl/tokens", - }, - { - "PUT", - "/v1/acl/token", - }, - { - "PUT", - "/v1/agent/token/agent", - }, + // We expect not to see the call to /v1/acl/bootstrap. + require.Equal(t, []APICall{ + // We only expect the calls to get the datacenter { "GET", "/v1/agent/self", }, { "PUT", - "/v1/acl/policy", - }, - { - "PUT", - "/v1/acl/token", - }, - }, consulAPICalls) -} - -// Test if there is an old bootstrap Secret and the server token exists -// that we don't try and recreate the token. -func TestRun_AlreadyBootstrapped_ServerTokenExists(t *testing.T) { - t.Parallel() - require := require.New(t) - - // First set everything up with ACLs bootstrapped. - bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" - k8s, testAgent := completeBootstrappedSetup(t, bootToken) - setUpK8sServiceAccount(t, k8s, ns) - defer testAgent.Stop() - k8s.CoreV1().Secrets(ns).Create(context.Background(), &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourcePrefix + "-bootstrap-acl-token", - }, - Data: map[string][]byte{ - "token": []byte(bootToken), - }, - }, metav1.CreateOptions{}) - - consulClient, err := api.NewClient(&api.Config{ - Address: testAgent.HTTPAddr, - Token: bootToken, - }) - require.NoError(err) - ui := cli.NewMockUi() - cmd := Command{ - UI: ui, - clientset: k8s, - } - - // Create the server policy and token _before_ we run the command. - agentPolicyRules, err := cmd.agentRules() - require.NoError(err) - policy, _, err := consulClient.ACL().PolicyCreate(&api.ACLPolicy{ - Name: "agent-token", - Description: "Agent Token Policy", - Rules: agentPolicyRules, - }, nil) - require.NoError(err) - _, _, err = consulClient.ACL().TokenCreate(&api.ACLToken{ - Description: fmt.Sprintf("Server Token for %s", strings.Split(testAgent.HTTPAddr, ":")[0]), - Policies: []*api.ACLTokenPolicyLink{ - { - Name: policy.Name, - }, - }, - }, nil) - require.NoError(err) - - // Run the command. - cmdArgs := []string{ - "-timeout=1m", - "-k8s-namespace", ns, - "-server-address", strings.Split(testAgent.HTTPAddr, ":")[0], - "-server-port", strings.Split(testAgent.HTTPAddr, ":")[1], - "-resource-prefix", resourcePrefix, - } - - responseCode := cmd.Run(cmdArgs) - require.Equal(0, responseCode, ui.ErrorWriter.String()) - - // Check that only one server token exists, i.e. it didn't create an - // extra token. - tokens, _, err := consulClient.ACL().TokenList(nil) - require.NoError(err) - count := 0 - for _, token := range tokens { - if len(token.Policies) == 1 && token.Policies[0].Name == policy.Name { - count++ - } - } - require.Equal(1, count) -} - -// Test if there is a provided bootstrap we skip bootstrapping of the servers -// and continue on to the next step. -func TestRun_SkipBootstrapping_WhenBootstrapTokenIsProvided(t *testing.T) { - t.Parallel() - require := require.New(t) - k8s := fake.NewSimpleClientset() - - bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" - tokenFile := common.WriteTempFile(t, bootToken) - - type APICall struct { - Method string - Path string - } - var consulAPICalls []APICall - - // Start the Consul server. - consulServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Record all the API calls made. - consulAPICalls = append(consulAPICalls, APICall{ - Method: r.Method, - Path: r.URL.Path, - }) - switch r.URL.Path { - case "/v1/agent/self": - fmt.Fprintln(w, `{"Config": {"Datacenter": "dc1", "PrimaryDatacenter": "dc1"}}`) - default: - // Send an empty JSON response with code 200 to all calls. - fmt.Fprintln(w, "{}") - } - })) - defer consulServer.Close() - - serverURL, err := url.Parse(consulServer.URL) - require.NoError(err) - - // Run the command. - ui := cli.NewMockUi() - cmd := Command{ - UI: ui, - clientset: k8s, - } - - responseCode := cmd.Run([]string{ - "-timeout=500ms", - "-resource-prefix=" + resourcePrefix, - "-k8s-namespace=" + ns, - "-server-address=" + serverURL.Hostname(), - "-server-port=" + serverURL.Port(), - "-bootstrap-token-file=" + tokenFile, - "-create-client-token=false", // disable client token, so there are less calls - }) - require.Equal(0, responseCode, ui.ErrorWriter.String()) - - // Test that the expected API calls were made. - // We expect not to see the call to /v1/acl/bootstrap. - require.Equal([]APICall{ - // We only expect the calls to get the datacenter - { - "GET", - "/v1/agent/self", + "/v1/acl/auth-method", }, }, consulAPICalls) } @@ -1910,7 +1786,6 @@ func TestRun_SkipBootstrapping_WhenBootstrapTokenIsProvided(t *testing.T) { // Test that we exit after timeout. func TestRun_Timeout(t *testing.T) { t.Parallel() - require := require.New(t) k8s := fake.NewSimpleClientset() ui := cli.NewMockUi() cmd := Command{ @@ -1924,15 +1799,15 @@ func TestRun_Timeout(t *testing.T) { "-k8s-namespace=" + ns, "-server-address=foo", }) - require.Equal(1, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 1, responseCode, ui.ErrorWriter.String()) } // Test that the bootstrapping process can make calls to Consul API over HTTPS // when the consul agent is configured with HTTPS. func TestRun_HTTPS(t *testing.T) { t.Parallel() - require := require.New(t) k8s := fake.NewSimpleClientset() + setUpK8sServiceAccount(t, k8s, ns) caFile, certFile, keyFile := test.GenerateServerCerts(t) @@ -1943,7 +1818,7 @@ func TestRun_HTTPS(t *testing.T) { c.CertFile = certFile c.KeyFile = keyFile }) - require.NoError(err) + require.NoError(t, err) defer srv.Stop() // Run the command. @@ -1963,15 +1838,15 @@ func TestRun_HTTPS(t *testing.T) { "-server-address=" + strings.Split(srv.HTTPSAddr, ":")[0], "-server-port=" + strings.Split(srv.HTTPSAddr, ":")[1], }) - require.Equal(0, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) // Test that the bootstrap token is created to make sure the bootstrapping succeeded. // The presence of the bootstrap token tells us that the API calls to Consul have been successful. tokenSecret, err := k8s.CoreV1().Secrets(ns).Get(context.Background(), resourcePrefix+"-bootstrap-acl-token", metav1.GetOptions{}) - require.NoError(err) - require.NotNil(tokenSecret) + require.NoError(t, err) + require.NotNil(t, tokenSecret) _, ok := tokenSecret.Data["token"] - require.True(ok) + require.True(t, ok) } // Test that the ACL replication token created from the primary DC can be used @@ -1981,6 +1856,7 @@ func TestRun_ACLReplicationTokenValid(t *testing.T) { secondaryK8s, secondaryConsulClient, secondaryAddr, aclReplicationToken, clean := completeReplicatedSetup(t) defer clean() + setUpK8sServiceAccount(t, secondaryK8s, ns) // completeReplicatedSetup ran the command in our primary dc so now we // need to run the command in our secondary dc. @@ -1999,8 +1875,9 @@ func TestRun_ACLReplicationTokenValid(t *testing.T) { "-server-port", strings.Split(secondaryAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-acl-replication-token-file", tokenFile, - "-create-client-token", - "-create-mesh-gateway-token", + "-auth-method-host=" + "https://my-kube.com", + "-client", + "-mesh-gateway", } responseCode := secondaryCmd.Run(secondaryCmdArgs) require.Equal(t, 0, responseCode, secondaryUI.ErrorWriter.String()) @@ -2008,21 +1885,21 @@ func TestRun_ACLReplicationTokenValid(t *testing.T) { // Test that replication was successful. retry.Run(t, func(r *retry.R) { replicationStatus, _, err := secondaryConsulClient.ACL().Replication(nil) - require.NoError(t, err) - require.True(t, replicationStatus.Enabled) - require.Greater(t, replicationStatus.ReplicatedIndex, uint64(0)) + require.NoError(r, err) + require.True(r, replicationStatus.Enabled) + require.Greater(r, replicationStatus.ReplicatedIndex, uint64(0)) }) // Test that the client policy was created. retry.Run(t, func(r *retry.R) { - p := policyExists(r, "client-token-dc2", secondaryConsulClient) + p := policyExists(r, "client-policy-dc2", secondaryConsulClient) require.Equal(r, []string{"dc2"}, p.Datacenters) }) // Test that the mesh-gateway policy was created. This is a global policy // so replication has to have worked for it to exist. retry.Run(t, func(r *retry.R) { - p := policyExists(r, "mesh-gateway-token-dc2", secondaryConsulClient) + p := policyExists(r, "mesh-gateway-policy-dc2", secondaryConsulClient) require.Len(r, p.Datacenters, 0) }) } @@ -2030,7 +1907,7 @@ func TestRun_ACLReplicationTokenValid(t *testing.T) { // Test that if acl replication is enabled, we don't create an anonymous token policy. func TestRun_AnonPolicy_IgnoredWithReplication(t *testing.T) { // The anonymous policy is configured when one of these flags is set. - cases := []string{"-allow-dns", "-create-inject-auth-method"} + cases := []string{"-allow-dns", "-connect-inject"} for _, flag := range cases { t.Run(flag, func(t *testing.T) { bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" @@ -2076,7 +1953,7 @@ func TestRun_CloudAutoJoin(t *testing.T) { k8s, testSvr := completeSetup(t) defer testSvr.Stop() - require := require.New(t) + setUpK8sServiceAccount(t, k8s, ns) // create a mock provider // that always returns the server address @@ -2101,7 +1978,7 @@ func TestRun_CloudAutoJoin(t *testing.T) { "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], } responseCode := cmd.Run(args) - require.Equal(0, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) // check that the provider has been called provider.AssertNumberOfCalls(t, "Addrs", 1) @@ -2114,15 +1991,15 @@ func TestRun_CloudAutoJoin(t *testing.T) { Address: testSvr.HTTPAddr, Token: bootToken, }) - require.NoError(err) + require.NoError(t, err) tokenData, _, err := consul.ACL().TokenReadSelf(nil) - require.NoError(err) - require.Equal("global-management", tokenData.Policies[0].Name) + require.NoError(t, err) + require.Equal(t, "global-management", tokenData.Policies[0].Name) // Check that the agent policy was created. agentPolicy := policyExists(t, "agent-token", consul) // Should be a global policy. - require.Len(agentPolicy.Datacenters, 0) + require.Len(t, agentPolicy.Datacenters, 0) } func TestRun_GatewayErrors(t *testing.T) { @@ -2155,6 +2032,7 @@ func TestRun_GatewayErrors(t *testing.T) { k8s, testSvr := completeSetup(tt) defer testSvr.Stop() + setUpK8sServiceAccount(t, k8s, ns) require := require.New(tt) // Run the command. @@ -2177,6 +2055,711 @@ func TestRun_GatewayErrors(t *testing.T) { } } +// Test creating the correct ACL policies and Binding Rules for components in the primary datacenter. +// The test works by running the command and then ensuring that: +// * An ACLBindingRule exists which references the ACLRole. +// * An ACLRole exists and has the correct PolicyName in it's ACLPolicyLinkRule list. +// * The ACLPolicy exists. +func TestRun_PoliciesAndBindingRulesForACLLogin_PrimaryDatacenter(t *testing.T) { + t.Parallel() + + cases := []struct { + TestName string + TokenFlags []string + PolicyNames []string + Roles []string + }{ + { + TestName: "Controller", + TokenFlags: []string{"-controller"}, + PolicyNames: []string{"controller-policy"}, + Roles: []string{resourcePrefix + "-controller-acl-role"}, + }, + { + TestName: "Connect Inject", + TokenFlags: []string{"-connect-inject"}, + PolicyNames: []string{"connect-inject-policy"}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role"}, + }, + { + TestName: "Sync Catalog", + TokenFlags: []string{"-sync-catalog"}, + PolicyNames: []string{"sync-catalog-policy"}, + Roles: []string{resourcePrefix + "-sync-catalog-acl-role"}, + }, + { + TestName: "API Gateway Controller", + TokenFlags: []string{"-api-gateway-controller"}, + PolicyNames: []string{"api-gateway-controller-policy"}, + Roles: []string{resourcePrefix + "-api-gateway-controller-acl-role"}, + }, + { + TestName: "Snapshot Agent", + TokenFlags: []string{"-snapshot-agent"}, + PolicyNames: []string{"snapshot-agent-policy"}, + Roles: []string{resourcePrefix + "-snapshot-agent-acl-role"}, + }, + { + TestName: "Mesh Gateway", + TokenFlags: []string{"-mesh-gateway"}, + PolicyNames: []string{"mesh-gateway-policy"}, + Roles: []string{resourcePrefix + "-mesh-gateway-acl-role"}, + }, + { + TestName: "Client", + TokenFlags: []string{"-client"}, + PolicyNames: []string{"client-policy"}, + Roles: []string{resourcePrefix + "-client-acl-role"}, + }, + { + TestName: "Terminating Gateway", + TokenFlags: []string{"-terminating-gateway-name=terminating", + "-terminating-gateway-name=gateway", + "-terminating-gateway-name=another-gateway"}, + PolicyNames: []string{resourcePrefix + "-terminating-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, + Roles: []string{resourcePrefix + "-terminating-acl-role", + resourcePrefix + "-gateway-acl-role", + resourcePrefix + "-another-gateway-acl-role"}, + }, + { + TestName: "Ingress Gateway", + TokenFlags: []string{"-ingress-gateway-name=ingress", + "-ingress-gateway-name=gateway", + "-ingress-gateway-name=another-gateway"}, + PolicyNames: []string{resourcePrefix + "-ingress-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, + Roles: []string{resourcePrefix + "-ingress-acl-role", + resourcePrefix + "-gateway-acl-role", + resourcePrefix + "-another-gateway-acl-role"}, + }, + } + for _, c := range cases { + t.Run(c.TestName, func(t *testing.T) { + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() + setUpK8sServiceAccount(t, k8s, ns) + + // Run the command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + cmdArgs := append([]string{ + "-timeout=500ms", + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace=" + ns, + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + }, c.TokenFlags...) + cmd.init() + responseCode := cmd.Run(cmdArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + bootToken := getBootToken(t, k8s, resourcePrefix, ns) + consul, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + Token: bootToken, + }) + require.NoError(t, err) + + // Check that the Role exists + has correct Policy and is associated with a BindingRule. + for i := range c.Roles { + // Check that the Policy exists. + policy, _, err := consul.ACL().PolicyReadByName(c.PolicyNames[i], &api.QueryOptions{}) + require.NoError(t, err) + require.NotNil(t, policy) + + // Check that the Role exists. + role, _, err := consul.ACL().RoleReadByName(c.Roles[i], &api.QueryOptions{}) + require.NoError(t, err) + require.NotNil(t, role) + + // Check that the Role references the Policy. + found := false + for j := range role.Policies { + if role.Policies[j].Name == policy.Name { + found = true + break + } + } + require.True(t, found) + + // Check that there exists a BindingRule that references this Role. + rb, _, err := consul.ACL().BindingRuleList(fmt.Sprintf("%s-%s", resourcePrefix, componentAuthMethod), &api.QueryOptions{}) + require.NoError(t, err) + require.NotNil(t, rb) + found = false + for j := range rb { + if rb[j].BindName == c.Roles[i] { + found = true + break + } + } + require.True(t, found) + } + + }) + } +} + +// Test creating the correct ACL policies and Binding Rules for components in the secondary datacenter. +// This tests specifically tests that policies and roles for global tokens are correctly created. +// The test works by running the command and then ensuring that: +// * An ACLBindingRule exists which references the ACLRole. +// * An ACLRole exists and has the correct PolicyName in it's ACLPolicyLinkRule list. +// * The ACLPolicy exists. +func TestRun_PoliciesAndBindingRulesACLLogin_SecondaryDatacenter(t *testing.T) { + t.Parallel() + + const ( + secondaryDatacenter = "dc2" + primaryDatacenter = "dc1" + ) + cases := []struct { + TestName string + TokenFlags []string + PolicyNames []string + Roles []string + GlobalAuthMethod bool + }{ + { + TestName: "Controller", + TokenFlags: []string{"-controller"}, + PolicyNames: []string{"controller-policy-" + secondaryDatacenter}, + Roles: []string{resourcePrefix + "-controller-acl-role-" + secondaryDatacenter}, + GlobalAuthMethod: true, + }, + { + TestName: "Connect Inject", + TokenFlags: []string{"-connect-inject"}, + PolicyNames: []string{"connect-inject-policy-" + secondaryDatacenter}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role-" + secondaryDatacenter}, + GlobalAuthMethod: false, + }, + { + TestName: "Sync Catalog", + TokenFlags: []string{"-sync-catalog"}, + PolicyNames: []string{"sync-catalog-policy-" + secondaryDatacenter}, + Roles: []string{resourcePrefix + "-sync-catalog-acl-role-" + secondaryDatacenter}, + GlobalAuthMethod: false, + }, + { + TestName: "API Gateway Controller", + TokenFlags: []string{"-api-gateway-controller"}, + PolicyNames: []string{"api-gateway-controller-policy-" + secondaryDatacenter}, + Roles: []string{resourcePrefix + "-api-gateway-controller-acl-role-" + secondaryDatacenter}, + GlobalAuthMethod: false, + }, + { + TestName: "Snapshot Agent", + TokenFlags: []string{"-snapshot-agent"}, + PolicyNames: []string{"snapshot-agent-policy-" + secondaryDatacenter}, + Roles: []string{resourcePrefix + "-snapshot-agent-acl-role-" + secondaryDatacenter}, + GlobalAuthMethod: false, + }, + { + TestName: "Mesh Gateway", + TokenFlags: []string{"-mesh-gateway"}, + PolicyNames: []string{"mesh-gateway-policy-" + secondaryDatacenter}, + Roles: []string{resourcePrefix + "-mesh-gateway-acl-role-" + secondaryDatacenter}, + GlobalAuthMethod: true, + }, + { + TestName: "Client", + TokenFlags: []string{"-client"}, + PolicyNames: []string{"client-policy-" + secondaryDatacenter}, + Roles: []string{resourcePrefix + "-client-acl-role-" + secondaryDatacenter}, + GlobalAuthMethod: false, + }, + { + TestName: "Terminating Gateway", + TokenFlags: []string{"-terminating-gateway-name=terminating", + "-terminating-gateway-name=gateway", + "-terminating-gateway-name=another-gateway"}, + PolicyNames: []string{resourcePrefix + "-terminating-policy-" + secondaryDatacenter, + resourcePrefix + "-gateway-policy-" + secondaryDatacenter, + resourcePrefix + "-another-gateway-policy-" + secondaryDatacenter}, + Roles: []string{resourcePrefix + "-terminating-acl-role-" + secondaryDatacenter, + resourcePrefix + "-gateway-acl-role-" + secondaryDatacenter, + resourcePrefix + "-another-gateway-acl-role-" + secondaryDatacenter}, + GlobalAuthMethod: false, + }, + { + TestName: "Ingress Gateway", + TokenFlags: []string{"-ingress-gateway-name=ingress", + "-ingress-gateway-name=gateway", + "-ingress-gateway-name=another-gateway"}, + PolicyNames: []string{resourcePrefix + "-ingress-policy-" + secondaryDatacenter, + resourcePrefix + "-gateway-policy-" + secondaryDatacenter, + resourcePrefix + "-another-gateway-policy-" + secondaryDatacenter}, + Roles: []string{resourcePrefix + "-ingress-acl-role-" + secondaryDatacenter, + resourcePrefix + "-gateway-acl-role-" + secondaryDatacenter, + resourcePrefix + "-another-gateway-acl-role-" + secondaryDatacenter}, + GlobalAuthMethod: false, + }, + } + for _, c := range cases { + t.Run(c.TestName, func(t *testing.T) { + bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" + tokenFile := common.WriteTempFile(t, bootToken) + k8s, consul, consulHTTPAddr, cleanup := mockReplicatedSetup(t, bootToken) + setUpK8sServiceAccount(t, k8s, ns) + defer cleanup() + + // Run the command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + cmdArgs := append([]string{ + "-federation", + "-timeout=1m", + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace=" + ns, + "-auth-method-host=" + "https://my-kube.com", + "-acl-replication-token-file", tokenFile, + "-server-address", strings.Split(consulHTTPAddr, ":")[0], + "-server-port", strings.Split(consulHTTPAddr, ":")[1], + }, c.TokenFlags...) + cmd.init() + responseCode := cmd.Run(cmdArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + datacenter := "" + if c.GlobalAuthMethod { + datacenter = primaryDatacenter + } + + retry.Run(t, func(r *retry.R) { + // Check that the Role exists + has correct Policy and is associated with a BindingRule. + for i := range c.Roles { + // Check that the Policy exists. + policy, _, err := consul.ACL().PolicyReadByName(c.PolicyNames[i], &api.QueryOptions{Datacenter: primaryDatacenter}) + require.NoError(r, err) + require.NotNil(r, policy) + + // Check that the Role exists. + role, _, err := consul.ACL().RoleReadByName(c.Roles[i], &api.QueryOptions{Datacenter: datacenter}) + require.NoError(r, err) + require.NotNil(r, role) + + // Check that the Role references the Policy. + found := false + for j := range role.Policies { + if role.Policies[j].Name == policy.Name { + found = true + break + } + } + require.True(r, found) + + // Check that there exists a BindingRule that references this Role. + authMethodName := fmt.Sprintf("%s-%s", resourcePrefix, componentAuthMethod) + if c.GlobalAuthMethod { + authMethodName = fmt.Sprintf("%s-%s-%s", resourcePrefix, componentAuthMethod, secondaryDatacenter) + } + rb, _, err := consul.ACL().BindingRuleList(authMethodName, &api.QueryOptions{Datacenter: datacenter}) + require.NoError(r, err) + require.NotNil(r, rb) + found = false + for j := range rb { + if rb[j].BindName == c.Roles[i] { + found = true + break + } + } + require.True(r, found) + } + }) + }) + } +} + +// Test that server-acl-init in the primary datacenter creates the desired token. +// The test works by running the login command and then ensuring that the token +// returned has the correct role for the component. +func TestRun_ValidateLoginToken_PrimaryDatacenter(t *testing.T) { + t.Parallel() + + cases := []struct { + ComponentName string + TokenFlags []string + Roles []string + ServiceAccountName string + GlobalToken bool + }{ + { + ComponentName: "controller", + TokenFlags: []string{"-controller"}, + Roles: []string{resourcePrefix + "-controller-acl-role"}, + GlobalToken: false, + }, + { + ComponentName: "connect-injector", + TokenFlags: []string{"-connect-inject"}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role"}, + GlobalToken: false, + }, + { + ComponentName: "sync-catalog", + TokenFlags: []string{"-sync-catalog"}, + Roles: []string{resourcePrefix + "-sync-catalog-acl-role"}, + GlobalToken: false, + }, + { + ComponentName: "api-gateway-controller", + TokenFlags: []string{"-api-gateway-controller"}, + Roles: []string{resourcePrefix + "-api-gateway-controller-acl-role"}, + GlobalToken: false, + }, + { + ComponentName: "snapshot-agent", + TokenFlags: []string{"-snapshot-agent"}, + Roles: []string{resourcePrefix + "-snapshot-agent-acl-role"}, + GlobalToken: false, + }, + { + ComponentName: "mesh-gateway", + TokenFlags: []string{"-mesh-gateway"}, + Roles: []string{resourcePrefix + "-mesh-gateway-acl-role"}, + GlobalToken: false, + }, + { + ComponentName: "client", + TokenFlags: []string{"-client"}, + Roles: []string{resourcePrefix + "-client-acl-role"}, + GlobalToken: false, + }, + // We are only testing one terminating gateway here because we are just + // validating that we can issue a consul login for each component type. + // Having multiple gateways is not necessary given that to make the + // login that occurs from the test here would require getting multiple + // tokens in the test fixture instead of the hardcoded + // serviceAccountToken we have in this file + { + ComponentName: "terminating-gateway", + TokenFlags: []string{"-terminating-gateway-name=terminating"}, + Roles: []string{resourcePrefix + "-terminating-acl-role"}, + ServiceAccountName: fmt.Sprintf("%s-%s", resourcePrefix, "terminating"), + GlobalToken: false, + }, + { + ComponentName: "ingress-gateway", + TokenFlags: []string{"-ingress-gateway-name=ingress"}, + Roles: []string{resourcePrefix + "-ingress-acl-role"}, + ServiceAccountName: fmt.Sprintf("%s-%s", resourcePrefix, "ingress"), + GlobalToken: false, + }, + } + for _, c := range cases { + t.Run(c.ComponentName, func(t *testing.T) { + authMethodName := fmt.Sprintf("%s-%s", resourcePrefix, componentAuthMethod) + serviceAccountName := fmt.Sprintf("%s-%s", resourcePrefix, c.ComponentName) + if len(c.ServiceAccountName) > 0 { + serviceAccountName = c.ServiceAccountName + } + + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() + _, jwtToken := setUpK8sServiceAccount(t, k8s, ns) + + k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("content-type", "application/json") + if r != nil && r.URL.Path == "/apis/authentication.k8s.io/v1/tokenreviews" && r.Method == "POST" { + w.Write([]byte(test.TokenReviewsResponse(serviceAccountName, ns))) + } + if r != nil && r.URL.Path == fmt.Sprintf("/api/v1/namespaces/%s/serviceaccounts/%s", ns, serviceAccountName) && + r.Method == "GET" { + w.Write([]byte(test.ServiceAccountGetResponse(serviceAccountName, ns))) + } + })) + t.Cleanup(k8sMockServer.Close) + + // Run the command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + cmdArgs := append([]string{ + "-timeout=500ms", + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace=" + ns, + "-auth-method-host=" + k8sMockServer.URL, + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + }, c.TokenFlags...) + cmd.init() + responseCode := cmd.Run(cmdArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + client, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + }) + require.NoError(t, err) + + tok, _, err := client.ACL().Login(&api.ACLLoginParams{ + AuthMethod: authMethodName, + BearerToken: jwtToken, + Meta: map[string]string{}, + }, &api.WriteOptions{}) + require.NoError(t, err) + + require.Equal(t, len(tok.Roles), len(c.Roles)) + for _, role := range tok.Roles { + require.Contains(t, c.Roles, role.Name) + } + require.Equal(t, !c.GlobalToken, tok.Local) + }) + } +} + +// Test that server-acl-init in the secondary datacenter creates the desired token. +// The test works by running the login command and then ensuring that the token +// returned has the correct role for the component. +func TestRun_ValidateLoginToken_SecondaryDatacenter(t *testing.T) { + t.Parallel() + + cases := []struct { + ComponentName string + TokenFlags []string + Roles []string + ServiceAccountName string + GlobalAuthMethod bool + GlobalToken bool + }{ + { + ComponentName: "controller", + TokenFlags: []string{"-controller"}, + Roles: []string{resourcePrefix + "-controller-acl-role-dc2"}, + GlobalAuthMethod: true, + GlobalToken: true, + }, + { + ComponentName: "connect-injector", + TokenFlags: []string{"-connect-inject"}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role-dc2"}, + GlobalAuthMethod: false, + GlobalToken: false, + }, + { + ComponentName: "sync-catalog", + TokenFlags: []string{"-sync-catalog"}, + Roles: []string{resourcePrefix + "-sync-catalog-acl-role-dc2"}, + GlobalAuthMethod: false, + GlobalToken: false, + }, + { + ComponentName: "api-gateway-controller", + TokenFlags: []string{"-api-gateway-controller"}, + Roles: []string{resourcePrefix + "-api-gateway-controller-acl-role-dc2"}, + GlobalAuthMethod: false, + GlobalToken: false, + }, + { + ComponentName: "snapshot-agent", + TokenFlags: []string{"-snapshot-agent"}, + Roles: []string{resourcePrefix + "-snapshot-agent-acl-role-dc2"}, + GlobalAuthMethod: false, + GlobalToken: false, + }, + { + ComponentName: "mesh-gateway", + TokenFlags: []string{"-mesh-gateway"}, + Roles: []string{resourcePrefix + "-mesh-gateway-acl-role-dc2"}, + GlobalAuthMethod: true, + GlobalToken: true, + }, + { + ComponentName: "client", + TokenFlags: []string{"-client"}, + Roles: []string{resourcePrefix + "-client-acl-role-dc2"}, + GlobalAuthMethod: false, + GlobalToken: false, + }, + // We are only testing one terminating gateway here because we are just + // validating that we can issue a consul login for each component type. + // Having multiple gateways is not necessary given that to make the + // login that occurs from the test here would require getting multiple + // tokens in the test fixture instead of the hardcoded + // serviceAccountToken we have in this file + { + ComponentName: "terminating-gateway", + TokenFlags: []string{"-terminating-gateway-name=terminating"}, + Roles: []string{resourcePrefix + "-terminating-acl-role-dc2"}, + ServiceAccountName: fmt.Sprintf("%s-%s", resourcePrefix, "terminating"), + GlobalAuthMethod: false, + GlobalToken: false, + }, + { + ComponentName: "ingress-gateway", + TokenFlags: []string{"-ingress-gateway-name=ingress"}, + Roles: []string{resourcePrefix + "-ingress-acl-role-dc2"}, + ServiceAccountName: fmt.Sprintf("%s-%s", resourcePrefix, "ingress"), + GlobalAuthMethod: false, + GlobalToken: false, + }, + } + for _, c := range cases { + t.Run(c.ComponentName, func(t *testing.T) { + bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" + tokenFile := common.WriteTempFile(t, bootToken) + authMethodName := fmt.Sprintf("%s-%s", resourcePrefix, componentAuthMethod) + if c.GlobalAuthMethod { + authMethodName = fmt.Sprintf("%s-%s-%s", resourcePrefix, componentAuthMethod, "dc2") + } + serviceAccountName := fmt.Sprintf("%s-%s", resourcePrefix, c.ComponentName) + if len(c.ServiceAccountName) > 0 { + serviceAccountName = c.ServiceAccountName + } + + k8s, _, consulHTTPAddr, cleanup := mockReplicatedSetup(t, bootToken) + defer cleanup() + _, jwtToken := setUpK8sServiceAccount(t, k8s, ns) + + k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("content-type", "application/json") + if r != nil && r.URL.Path == "/apis/authentication.k8s.io/v1/tokenreviews" && r.Method == "POST" { + w.Write([]byte(test.TokenReviewsResponse(serviceAccountName, ns))) + } + if r != nil && r.URL.Path == fmt.Sprintf("/api/v1/namespaces/%s/serviceaccounts/%s", ns, serviceAccountName) && + r.Method == "GET" { + w.Write([]byte(test.ServiceAccountGetResponse(serviceAccountName, ns))) + } + })) + t.Cleanup(k8sMockServer.Close) + + // Run the command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + cmdArgs := append([]string{ + "-federation", + "-timeout=1m", + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace=" + ns, + "-acl-replication-token-file", tokenFile, + "-auth-method-host=" + k8sMockServer.URL, + "-server-address", strings.Split(consulHTTPAddr, ":")[0], + "-server-port", strings.Split(consulHTTPAddr, ":")[1], + }, c.TokenFlags...) + cmd.init() + responseCode := cmd.Run(cmdArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + datacenter := "" + if c.GlobalAuthMethod { + datacenter = "dc1" + } + client, err := api.NewClient(&api.Config{ + Address: consulHTTPAddr, + Datacenter: datacenter, + }) + require.NoError(t, err) + + retry.Run(t, func(r *retry.R) { + tok, _, err := client.ACL().Login(&api.ACLLoginParams{ + AuthMethod: authMethodName, + BearerToken: jwtToken, + Meta: map[string]string{}, + }, &api.WriteOptions{}) + require.NoError(r, err) + + require.Equal(r, len(tok.Roles), len(c.Roles)) + for _, role := range tok.Roles { + require.Contains(r, c.Roles, role.Name) + } + require.Equal(r, !c.GlobalToken, tok.Local) + }) + }) + } +} + +// Test that the component auth method gets created. +func TestRun_PrimaryDatacenter_ComponentAuthMethod(t *testing.T) { + t.Parallel() + + k8s, testSvr := completeSetup(t) + setUpK8sServiceAccount(t, k8s, ns) + defer testSvr.Stop() + + // Run the command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + cmd.init() + cmdArgs := []string{ + "-timeout=1m", + "-k8s-namespace=" + ns, + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + "-resource-prefix=" + resourcePrefix} + + responseCode := cmd.Run(cmdArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + // Check that the expected policy was created. + bootToken := getBootToken(t, k8s, resourcePrefix, ns) + consulClient, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + Token: bootToken, + }) + require.NoError(t, err) + authMethod, _, err := consulClient.ACL().AuthMethodRead(resourcePrefix+"-k8s-component-auth-method", &api.QueryOptions{}) + require.NoError(t, err) + require.NotNil(t, authMethod) +} + +// Test that the local and global component auth methods gets created when run in the +// secondary datacenter. +func TestRun_SecondaryDatacenter_ComponentAuthMethod(t *testing.T) { + t.Parallel() + + bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" + tokenFile := common.WriteTempFile(t, bootToken) + k8s, consul, consulHTTPAddr, cleanup := mockReplicatedSetup(t, bootToken) + setUpK8sServiceAccount(t, k8s, ns) + defer cleanup() + + // Run the command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + cmd.init() + cmdArgs := []string{ + "-federation", + "-timeout=1m", + "-k8s-namespace=" + ns, + "-auth-method-host=" + "https://my-kube.com", + "-acl-replication-token-file", tokenFile, + "-server-address", strings.Split(consulHTTPAddr, ":")[0], + "-server-port", strings.Split(consulHTTPAddr, ":")[1], + "-resource-prefix=" + resourcePrefix} + + responseCode := cmd.Run(cmdArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + // Check that the expected local auth-method was created. + localAuthMethod, _, err := consul.ACL().AuthMethodRead(resourcePrefix+"-k8s-component-auth-method", &api.QueryOptions{}) + require.NoError(t, err) + require.NotNil(t, localAuthMethod) + // Check that the expected global auth-method was created. + globalAuthMethod, _, err := consul.ACL().AuthMethodRead(resourcePrefix+"-k8s-component-auth-method-dc2", &api.QueryOptions{Datacenter: "dc1"}) + require.NoError(t, err) + require.NotNil(t, globalAuthMethod) +} + // Set up test consul agent and kubernetes cluster. func completeSetup(t *testing.T) (*fake.Clientset, *testutil.TestServer) { k8s := fake.NewSimpleClientset() @@ -2252,6 +2835,7 @@ func replicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Clie if bootToken == "" { primaryK8s := fake.NewSimpleClientset() require.NoError(t, err) + setUpK8sServiceAccount(t, primaryK8s, ns) // Run the command to bootstrap ACLs primaryUI := cli.NewMockUi() @@ -2354,7 +2938,7 @@ func getBootToken(t *testing.T, k8s *fake.Clientset, prefix string, k8sNamespace func setUpK8sServiceAccount(t *testing.T, k8s *fake.Clientset, namespace string) (string, string) { // Create ServiceAccount for the kubernetes auth method if it doesn't exist, // otherwise, do nothing. - serviceAccountName := resourcePrefix + "-connect-injector" + serviceAccountName := resourcePrefix + "-auth-method" sa, _ := k8s.CoreV1().ServiceAccounts(namespace).Get(context.Background(), serviceAccountName, metav1.GetOptions{}) if sa == nil { // Create a service account that references two secrets. @@ -2371,7 +2955,7 @@ func setUpK8sServiceAccount(t *testing.T, k8s *fake.Clientset, namespace string) Name: resourcePrefix + "-some-other-secret", }, { - Name: resourcePrefix + "-connect-injector", + Name: resourcePrefix + "-auth-method", }, }, }, @@ -2386,7 +2970,7 @@ func setUpK8sServiceAccount(t *testing.T, k8s *fake.Clientset, namespace string) require.NoError(t, err) // Create a Kubernetes secret if it doesn't exist, otherwise update it - secretName := resourcePrefix + "-connect-injector" + secretName := resourcePrefix + "-auth-method" secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, diff --git a/control-plane/subcommand/server-acl-init/connect_inject.go b/control-plane/subcommand/server-acl-init/connect_inject.go index abd10f9f7f..5112eee906 100644 --- a/control-plane/subcommand/server-acl-init/connect_inject.go +++ b/control-plane/subcommand/server-acl-init/connect_inject.go @@ -1,7 +1,6 @@ package serveraclinit import ( - "errors" "fmt" "github.com/hashicorp/consul-k8s/control-plane/namespaces" @@ -18,13 +17,11 @@ const defaultKubernetesHost = "https://kubernetes.default.svc" // configureConnectInject sets up auth methods so that connect injection will // work. -func (c *Command) configureConnectInjectAuthMethod(consulClient *api.Client) error { - - authMethodName := c.withPrefix("k8s-auth-method") +func (c *Command) configureConnectInjectAuthMethod(consulClient *api.Client, authMethodName string) error { // Create the auth method template. This requires calls to the // kubernetes environment. - authMethodTmpl, err := c.createAuthMethodTmpl(authMethodName) + authMethodTmpl, err := c.createAuthMethodTmpl(authMethodName, true) if err != nil { return err } @@ -68,6 +65,7 @@ func (c *Command) configureConnectInjectAuthMethod(consulClient *api.Client) err return err } + c.log.Info("creating inject binding rule") // Create the binding rule. abr := api.ACLBindingRule{ Description: "Kubernetes binding rule", @@ -76,74 +74,20 @@ func (c *Command) configureConnectInjectAuthMethod(consulClient *api.Client) err BindName: "${serviceaccount.name}", Selector: c.flagBindingRuleSelector, } - - // Binding rule list api call query options - queryOptions := api.QueryOptions{} - - // Add a namespace if appropriate - // If namespaces and mirroring are enabled, this is not necessary because - // the binding rule will fall back to being created in the Consul `default` - // namespace automatically, as is necessary for mirroring. - if c.flagEnableNamespaces && !c.flagEnableInjectK8SNSMirroring { - abr.Namespace = c.flagConsulInjectDestinationNamespace - queryOptions.Namespace = c.flagConsulInjectDestinationNamespace - } - - var existingRules []*api.ACLBindingRule - err = c.untilSucceeds(fmt.Sprintf("listing binding rules for auth method %s", authMethodName), - func() error { - var err error - existingRules, _, err = consulClient.ACL().BindingRuleList(authMethodName, &queryOptions) - return err - }) - if err != nil { - return err - } - - // If the binding rule already exists, update it - // This updates the binding rule any time the acl bootstrapping - // command is rerun, which is a bit of extra overhead, but is - // necessary to pick up any potential config changes. - if len(existingRules) > 0 { - // Find the policy that matches our name and description - // and that's the ID we need - for _, existingRule := range existingRules { - if existingRule.BindName == abr.BindName && existingRule.Description == abr.Description { - abr.ID = existingRule.ID - } - } - - // This will only happen if there are existing policies - // for this auth method, but none that match the binding - // rule set up here in the bootstrap method. - if abr.ID == "" { - return errors.New("unable to find a matching ACL binding rule to update") - } - - err = c.untilSucceeds(fmt.Sprintf("updating acl binding rule for %s", authMethodName), - func() error { - _, _, err := consulClient.ACL().BindingRuleUpdate(&abr, nil) - return err - }) - } else { - // Otherwise create the binding rule - err = c.untilSucceeds(fmt.Sprintf("creating acl binding rule for %s", authMethodName), - func() error { - _, _, err := consulClient.ACL().BindingRuleCreate(&abr, nil) - return err - }) - } - return err + return c.createConnectBindingRule(consulClient, authMethodName, &abr) } -func (c *Command) createAuthMethodTmpl(authMethodName string) (api.ACLAuthMethod, error) { +// createAuthMethodTmpl sets up the auth method template based on the connect-injector's service account +// jwt token. It is common for both the connect inject auth method and the component auth method +// with the option to add namespace specific configuration to the auth method template via `useNS`. +func (c *Command) createAuthMethodTmpl(authMethodName string, useNS bool) (api.ACLAuthMethod, error) { // Get the Secret name for the auth method ServiceAccount. var authMethodServiceAccount *apiv1.ServiceAccount - saName := c.withPrefix("connect-injector") - err := c.untilSucceeds(fmt.Sprintf("getting %s ServiceAccount", saName), + serviceAccountName := c.withPrefix("auth-method") + err := c.untilSucceeds(fmt.Sprintf("getting %s ServiceAccount", serviceAccountName), func() error { var err error - authMethodServiceAccount, err = c.clientset.CoreV1().ServiceAccounts(c.flagK8sNamespace).Get(c.ctx, saName, metav1.GetOptions{}) + authMethodServiceAccount, err = c.clientset.CoreV1().ServiceAccounts(c.flagK8sNamespace).Get(c.ctx, serviceAccountName, metav1.GetOptions{}) return err }) if err != nil { @@ -175,14 +119,14 @@ func (c *Command) createAuthMethodTmpl(authMethodName string) (api.ACLAuthMethod // a secret of type ServiceAccountToken. if saSecret == nil { return api.ACLAuthMethod{}, - fmt.Errorf("found no secret of type 'kubernetes.io/service-account-token' associated with the %s service account", saName) + fmt.Errorf("found no secret of type 'kubernetes.io/service-account-token' associated with the %s service account", serviceAccountName) } kubernetesHost := defaultKubernetesHost // Check if custom auth method Host and CACert are provided - if c.flagInjectAuthMethodHost != "" { - kubernetesHost = c.flagInjectAuthMethodHost + if c.flagAuthMethodHost != "" { + kubernetesHost = c.flagAuthMethodHost } // Now we're ready to set up Consul's auth method. @@ -197,8 +141,9 @@ func (c *Command) createAuthMethodTmpl(authMethodName string) (api.ACLAuthMethod }, } - // Add options for mirroring namespaces - if c.flagEnableNamespaces && c.flagEnableInjectK8SNSMirroring { + // Add options for mirroring namespaces, this is only used by the connect inject auth method + // and so can be disabled for the component auth method. + if useNS && c.flagEnableNamespaces && c.flagEnableInjectK8SNSMirroring { authMethodTmpl.Config["MapNamespaces"] = true authMethodTmpl.Config["ConsulNamespacePrefix"] = c.flagInjectK8SNSMirroringPrefix } diff --git a/control-plane/subcommand/server-acl-init/connect_inject_test.go b/control-plane/subcommand/server-acl-init/connect_inject_test.go index a17d635bc1..959f02e178 100644 --- a/control-plane/subcommand/server-acl-init/connect_inject_test.go +++ b/control-plane/subcommand/server-acl-init/connect_inject_test.go @@ -30,7 +30,7 @@ func TestCommand_createAuthMethodTmpl_SecretNotFound(t *testing.T) { ctx: ctx, } - serviceAccountName := resourcePrefix + "-connect-injector" + serviceAccountName := resourcePrefix + "-auth-method" secretName := resourcePrefix + "-connect-injector" // Create a service account referencing secretName @@ -64,6 +64,6 @@ func TestCommand_createAuthMethodTmpl_SecretNotFound(t *testing.T) { _, err := k8s.CoreV1().Secrets(ns).Create(ctx, secret, metav1.CreateOptions{}) require.NoError(t, err) - _, err = cmd.createAuthMethodTmpl("test") - require.EqualError(t, err, "found no secret of type 'kubernetes.io/service-account-token' associated with the release-name-consul-connect-injector service account") + _, err = cmd.createAuthMethodTmpl("test", true) + require.EqualError(t, err, "found no secret of type 'kubernetes.io/service-account-token' associated with the release-name-consul-auth-method service account") } diff --git a/control-plane/subcommand/server-acl-init/create_or_update.go b/control-plane/subcommand/server-acl-init/create_or_update.go index 80dca054bf..085372827b 100644 --- a/control-plane/subcommand/server-acl-init/create_or_update.go +++ b/control-plane/subcommand/server-acl-init/create_or_update.go @@ -10,6 +10,183 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// createACLPolicyRoleAndBindingRule will create the ACL Policy for the component +// then create a set of ACLRole and ACLBindingRule which tie the component's serviceaccount +// to the authMethod, allowing the serviceaccount to later be allowed to issue a Consul Login. +func (c *Command) createACLPolicyRoleAndBindingRule(componentName, rules, dc, primaryDC string, global, primary bool, authMethodName, serviceAccountName string, client *api.Client) error { + // Create policy with the given rules. + policyName := fmt.Sprintf("%s-policy", componentName) + if c.flagFederation && !primary { + // If performing ACL replication, we must ensure policy names are + // globally unique so we append the datacenter name but only in secondary datacenters.. + policyName += fmt.Sprintf("-%s", dc) + } + var datacenters []string + if !global && dc != "" { + datacenters = append(datacenters, dc) + } + policyTmpl := api.ACLPolicy{ + Name: policyName, + Description: fmt.Sprintf("%s Token Policy", policyName), + Rules: rules, + Datacenters: datacenters, + } + err := c.untilSucceeds(fmt.Sprintf("creating %s policy", policyTmpl.Name), + func() error { + return c.createOrUpdateACLPolicy(policyTmpl, client) + }) + if err != nil { + return err + } + + // Create an ACLRolePolicyLink list to attach to the ACLRole. + ap := &api.ACLRolePolicyLink{ + Name: policyName, + } + apl := []*api.ACLRolePolicyLink{} + apl = append(apl, ap) + + // Add the ACLRole and ACLBindingRule. + return c.addRoleAndBindingRule(client, serviceAccountName, authMethodName, apl, global, primary, primaryDC, dc) +} + +// addRoleAndBindingRule adds an ACLRole and ACLBindingRule which reference the authMethod. +func (c *Command) addRoleAndBindingRule(client *api.Client, serviceAccountName string, authMethodName string, policies []*api.ACLRolePolicyLink, global, primary bool, primaryDC, dc string) error { + // This is the ACLRole which will allow the component which uses the serviceaccount + // to be able to do a consul login. + aclRoleName := fmt.Sprintf("%s-acl-role", serviceAccountName) + if c.flagFederation && !primary { + // If performing ACL replication, we must ensure policy names are + // globally unique so we append the datacenter name but only in secondary datacenters. + aclRoleName += fmt.Sprintf("-%s", dc) + } + role := &api.ACLRole{ + Name: aclRoleName, + Description: fmt.Sprintf("ACL Role for %s", serviceAccountName), + Policies: policies, + } + err := c.updateOrCreateACLRole(client, role) + if err != nil { + c.log.Error("unable to update or create ACL Role", err) + return err + } + + // Create the ACLBindingRule, this ties the Policies defined in the Role to the authMethod via serviceaccount. + abr := &api.ACLBindingRule{ + Description: fmt.Sprintf("Binding Rule for %s", serviceAccountName), + AuthMethod: authMethodName, + Selector: fmt.Sprintf("serviceaccount.name==%q", serviceAccountName), + BindType: api.BindingRuleBindTypeRole, + BindName: aclRoleName, + } + writeOptions := &api.WriteOptions{} + if global && dc != primaryDC { + writeOptions.Datacenter = primaryDC + } + return c.createOrUpdateBindingRule(client, authMethodName, abr, &api.QueryOptions{}, writeOptions) +} + +// updateOrCreateACLRole will query to see if existing role is in place and update them +// or create them if they do not yet exist. +func (c *Command) updateOrCreateACLRole(client *api.Client, role *api.ACLRole) error { + err := c.untilSucceeds(fmt.Sprintf("update or create acl role for %s", role.Name), + func() error { + var err error + aclRole, _, err := client.ACL().RoleReadByName(role.Name, &api.QueryOptions{}) + if err != nil { + c.log.Error("unable to read ACL Roles", err) + return err + } + if aclRole != nil { + _, _, err := client.ACL().RoleUpdate(aclRole, &api.WriteOptions{}) + if err != nil { + c.log.Error("unable to update role", err) + return err + } + return nil + } + _, _, err = client.ACL().RoleCreate(role, &api.WriteOptions{}) + if err != nil { + c.log.Error("unable to create role", err) + return err + } + return err + }) + return err +} + +// createConnectBindingRule will query to see if existing binding rules are in place and update them +// or create them if they do not yet exist. +func (c *Command) createConnectBindingRule(client *api.Client, authMethodName string, abr *api.ACLBindingRule) error { + // Binding rule list api call query options. + queryOptions := api.QueryOptions{} + + // If namespaces and mirroring are enabled, this is not necessary because + // the binding rule will fall back to being created in the Consul `default` + // namespace automatically, as is necessary for mirroring. + if c.flagEnableNamespaces && !c.flagEnableInjectK8SNSMirroring { + abr.Namespace = c.flagConsulInjectDestinationNamespace + queryOptions.Namespace = c.flagConsulInjectDestinationNamespace + } + + return c.createOrUpdateBindingRule(client, authMethodName, abr, &queryOptions, nil) +} + +func (c *Command) createOrUpdateBindingRule(client *api.Client, authMethodName string, abr *api.ACLBindingRule, queryOptions *api.QueryOptions, writeOptions *api.WriteOptions) error { + var existingRules []*api.ACLBindingRule + err := c.untilSucceeds(fmt.Sprintf("listing binding rules for auth method %s", authMethodName), + func() error { + var err error + existingRules, _, err = client.ACL().BindingRuleList(authMethodName, queryOptions) + return err + }) + if err != nil { + return err + } + + // If the binding rule already exists, update it + // This updates the binding rule any time the acl bootstrapping + // command is rerun, which is a bit of extra overhead, but is + // necessary to pick up any potential config changes. + if len(existingRules) > 0 { + // Find the policy that matches our name and description + // and that's the ID we need + for _, existingRule := range existingRules { + if existingRule.BindName == abr.BindName && existingRule.Description == abr.Description { + abr.ID = existingRule.ID + } + } + + // This will only happen if there are existing policies + // for this auth method, but none that match the binding + // rule set up here in the bootstrap method. Hence the + // new binding rule must be created as it belongs to the + // same auth method. + if abr.ID == "" { + c.log.Info("unable to find a matching ACL binding rule to update. creating ACL binding rule.") + err = c.untilSucceeds(fmt.Sprintf("creating acl binding rule for %s", authMethodName), + func() error { + _, _, err := client.ACL().BindingRuleCreate(abr, writeOptions) + return err + }) + } else { + err = c.untilSucceeds(fmt.Sprintf("updating acl binding rule for %s", authMethodName), + func() error { + _, _, err := client.ACL().BindingRuleUpdate(abr, writeOptions) + return err + }) + } + } else { + // Otherwise create the binding rule + err = c.untilSucceeds(fmt.Sprintf("creating acl binding rule for %s", authMethodName), + func() error { + _, _, err := client.ACL().BindingRuleCreate(abr, writeOptions) + return err + }) + } + return err +} + // createLocalACL creates a policy and acl token for this dc (datacenter), i.e. // the policy is only valid for this datacenter and the token is a local token. func (c *Command) createLocalACL(name, rules, dc string, isPrimary bool, consulClient *api.Client) error { @@ -23,15 +200,14 @@ func (c *Command) createGlobalACL(name, rules, dc string, isPrimary bool, consul return c.createACL(name, rules, false, dc, isPrimary, consulClient, "") } -// createGlobalACLWithSecretID creates a global policy and acl token with provided secret ID. -func (c *Command) createGlobalACLWithSecretID(name, rules, dc string, isPrimary bool, consulClient *api.Client, secretID string) error { - return c.createACL(name, rules, false, dc, isPrimary, consulClient, secretID) +// createACLWithSecretID creates a global policy and acl token with provided secret ID. +func (c *Command) createACLWithSecretID(name, rules, dc string, isPrimary bool, consulClient *api.Client, secretID string, local bool) error { + return c.createACL(name, rules, local, dc, isPrimary, consulClient, secretID) } // createACL creates a policy with rules and name. If localToken is true then // the token will be a local token and the policy will be scoped to only dc. // If localToken is false, the policy will be global. -// The token will be written to a Kubernetes secret. // When secretID is provided, we will use that value for the created token and // will skip writing it to a Kubernetes secret (because in this case we assume that // this value already exists in some secrets storage). @@ -69,9 +245,9 @@ func (c *Command) createACL(name, rules string, localToken bool, dc string, isPr } // Check if the replication token already exists in some form. - secretName := c.withPrefix(name + "-acl-token") // When secretID is not provided, we assume that replication token should exist // as a Kubernetes secret. + secretName := c.withPrefix(name + "-acl-token") if secretID == "" { // Check if the secret already exists, if so, we assume the ACL has already been // created and return. @@ -136,7 +312,7 @@ func (c *Command) createOrUpdateACLPolicy(policy api.ACLPolicy, consulClient *ap // Allowing the Consul node name to be configurable also requires any sync // policy to be updated in case the node name has changed. if isPolicyExistsErr(err, policy.Name) { - if c.flagEnableNamespaces || c.flagCreateSyncToken { + if c.flagEnableNamespaces || c.flagSyncCatalog { c.log.Info(fmt.Sprintf("Policy %q already exists, updating", policy.Name)) // The policy ID is required in any PolicyUpdate call, so first we need to diff --git a/control-plane/subcommand/server-acl-init/create_or_update_test.go b/control-plane/subcommand/server-acl-init/create_or_update_test.go index 57cdffa2a1..5cd01fac25 100644 --- a/control-plane/subcommand/server-acl-init/create_or_update_test.go +++ b/control-plane/subcommand/server-acl-init/create_or_update_test.go @@ -20,10 +20,10 @@ func TestCreateOrUpdateACLPolicy_ErrorsIfDescriptionDoesNotMatch(t *testing.T) { ui := cli.NewMockUi() k8s := fake.NewSimpleClientset() cmd := Command{ - UI: ui, - clientset: k8s, - log: hclog.NewNullLogger(), - flagCreateSyncToken: true, + UI: ui, + clientset: k8s, + log: hclog.NewNullLogger(), + flagSyncCatalog: true, } // Start Consul. diff --git a/control-plane/subcommand/server-acl-init/servers.go b/control-plane/subcommand/server-acl-init/servers.go index 0f0ab8a0d1..63f9552dab 100644 --- a/control-plane/subcommand/server-acl-init/servers.go +++ b/control-plane/subcommand/server-acl-init/servers.go @@ -20,31 +20,40 @@ func (c *Command) bootstrapServers(serverAddresses []string, bootstrapToken, boo firstServerAddr := fmt.Sprintf("%s:%d", serverAddresses[0], c.flagServerPort) if bootstrapToken == "" { + c.log.Info("No bootstrap token from previous installation found, continuing on to bootstrapping") + var err error bootstrapToken, err = c.bootstrapACLs(firstServerAddr, scheme, bootTokenSecretName) if err != nil { return "", err } + } else { + c.log.Info(fmt.Sprintf("ACLs already bootstrapped - retrieved bootstrap token from Secret %q", bootTokenSecretName)) } - // Override our original client with a new one that has the bootstrap token - // set. - consulClient, err := consul.NewClient(&api.Config{ - Address: firstServerAddr, - Scheme: scheme, - Token: bootstrapToken, - TLSConfig: api.TLSConfig{ - Address: c.flagConsulTLSServerName, - CAFile: c.flagConsulCACert, - }, - }) - if err != nil { - return "", fmt.Errorf("creating Consul client for address %s: %s", firstServerAddr, err) - } + // We should only create and set server tokens when servers are running within this cluster. + if c.flagSetServerTokens { + c.log.Info("Setting Consul server tokens") - // Create new tokens for each server and apply them. - if err := c.setServerTokens(consulClient, serverAddresses, bootstrapToken, scheme); err != nil { - return "", err + // Override our original client with a new one that has the bootstrap token + // set. + consulClient, err := consul.NewClient(&api.Config{ + Address: firstServerAddr, + Scheme: scheme, + Token: bootstrapToken, + TLSConfig: api.TLSConfig{ + Address: c.flagConsulTLSServerName, + CAFile: c.flagConsulCACert, + }, + }) + if err != nil { + return "", fmt.Errorf("creating Consul client for address %s: %s", firstServerAddr, err) + } + + // Create new tokens for each server and apply them. + if err = c.setServerTokens(consulClient, serverAddresses, bootstrapToken, scheme); err != nil { + return "", err + } } return bootstrapToken, nil }