From 21ec8db89dcf0c27fe80ccf2b28703500e234768 Mon Sep 17 00:00:00 2001 From: Ryan C Koch Date: Tue, 14 Aug 2018 11:08:26 -0500 Subject: [PATCH] Initial release of open source module --- .gitignore | 40 +++ LICENSE | 1 + Makefile | 69 +++++ README.md | 195 +++++++++++++- auth.tf | 30 +++ dns.tf | 52 ++++ examples/deploy_service/README.md | 24 ++ examples/deploy_service/main.tf | 88 ++++++ examples/deploy_service/outputs.tf | 59 +++++ examples/deploy_service/variables.tf | 44 +++ examples/node_pool/README.md | 18 ++ examples/node_pool/main.tf | 98 +++++++ examples/node_pool/outputs.tf | 59 +++++ examples/node_pool/variables.tf | 44 +++ examples/simple/README.md | 18 ++ examples/simple/main.tf | 36 +++ examples/simple/outputs.tf | 59 +++++ examples/simple/variables.tf | 44 +++ examples/stub_domains/README.md | 23 ++ examples/stub_domains/main.tf | 48 ++++ examples/stub_domains/outputs.tf | 59 +++++ examples/stub_domains/variables.tf | 44 +++ main.tf | 148 +++++++++++ masq.tf | 42 +++ outputs.tf | 70 +++++ scripts/delete-default-resource.sh | 41 +++ scripts/kube_auth_wrapper.sh | 48 ++++ test/boilerplate/boilerplate.Dockerfile.txt | 13 + test/boilerplate/boilerplate.Makefile.txt | 13 + test/boilerplate/boilerplate.go.txt | 15 ++ test/boilerplate/boilerplate.py.txt | 13 + test/boilerplate/boilerplate.sh.txt | 13 + test/boilerplate/boilerplate.tf.txt | 15 ++ test/boilerplate/boilerplate.xml.txt | 15 ++ test/boilerplate/boilerplate.yaml.txt | 13 + test/integration/gcloud/.gitignore | 1 + test/integration/gcloud/integration.bats | 258 ++++++++++++++++++ test/integration/gcloud/run.sh | 225 ++++++++++++++++ test/integration/gcloud/sample.sh | 35 +++ test/make.sh | 86 ++++++ test/test_verify_boilerplate.py | 136 ++++++++++ test/verify_boilerplate.py | 279 ++++++++++++++++++++ variables.tf | 160 +++++++++++ 43 files changed, 2789 insertions(+), 2 deletions(-) create mode 100644 .gitignore create mode 100644 Makefile create mode 100644 auth.tf create mode 100644 dns.tf create mode 100644 examples/deploy_service/README.md create mode 100644 examples/deploy_service/main.tf create mode 100644 examples/deploy_service/outputs.tf create mode 100644 examples/deploy_service/variables.tf create mode 100644 examples/node_pool/README.md create mode 100644 examples/node_pool/main.tf create mode 100644 examples/node_pool/outputs.tf create mode 100644 examples/node_pool/variables.tf create mode 100644 examples/simple/README.md create mode 100644 examples/simple/main.tf create mode 100644 examples/simple/outputs.tf create mode 100644 examples/simple/variables.tf create mode 100644 examples/stub_domains/README.md create mode 100644 examples/stub_domains/main.tf create mode 100644 examples/stub_domains/outputs.tf create mode 100644 examples/stub_domains/variables.tf create mode 100644 main.tf create mode 100644 masq.tf create mode 100644 outputs.tf create mode 100755 scripts/delete-default-resource.sh create mode 100755 scripts/kube_auth_wrapper.sh create mode 100644 test/boilerplate/boilerplate.Dockerfile.txt create mode 100644 test/boilerplate/boilerplate.Makefile.txt create mode 100644 test/boilerplate/boilerplate.go.txt create mode 100644 test/boilerplate/boilerplate.py.txt create mode 100644 test/boilerplate/boilerplate.sh.txt create mode 100644 test/boilerplate/boilerplate.tf.txt create mode 100644 test/boilerplate/boilerplate.xml.txt create mode 100644 test/boilerplate/boilerplate.yaml.txt create mode 100644 test/integration/gcloud/.gitignore create mode 100755 test/integration/gcloud/integration.bats create mode 100755 test/integration/gcloud/run.sh create mode 100644 test/integration/gcloud/sample.sh create mode 100755 test/make.sh create mode 100755 test/test_verify_boilerplate.py create mode 100644 test/verify_boilerplate.py create mode 100644 variables.tf diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..fa9c7db119 --- /dev/null +++ b/.gitignore @@ -0,0 +1,40 @@ +# OSX leaves these everywhere on SMB shares +._* + +# OSX trash +.DS_Store + +# Python +*.pyc + +# Emacs save files +*~ +\#*\# +.\#* + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +### https://raw.github.com/github/gitignore/90f149de451a5433aebd94d02d11b0e28843a1af/Terraform.gitignore + +# Local .terraform directories +**/.terraform/* + +# .tfstate files +*.tfstate +*.tfstate.* + +# Crash log files +crash.log + +# Ignore any .tfvars files that are generated automatically for each Terraform run. Most +# .tfvars files are managed as part of configuration and so should be included in +# version control. +# +# example.tfvars + +test/integration/tmp diff --git a/LICENSE b/LICENSE index 261eeb9e9f..d645695673 100644 --- a/LICENSE +++ b/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..ffb7910645 --- /dev/null +++ b/Makefile @@ -0,0 +1,69 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make will use bash instead of sh +SHELL := /usr/bin/env bash + +# All is the first target in the file so it will get picked up when you just run 'make' on its own +all: check_shell check_python check_golang check_terraform check_docker check_base_files test_check_headers check_headers check_trailing_whitespace + +# The .PHONY directive tells make that this isn't a real target and so +# the presence of a file named 'check_shell' won't cause this target to stop +# working +.PHONY: check_shell +check_shell: + @source test/make.sh && check_shell + +.PHONY: check_python +check_python: + @source test/make.sh && check_python + +.PHONY: check_golang +check_golang: + @source test/make.sh && golang + +.PHONY: check_terraform +check_terraform: + @source test/make.sh && check_terraform + +.PHONY: check_docker +check_docker: + @source test/make.sh && docker + +.PHONY: check_base_files +check_base_files: + @source test/make.sh && basefiles + +.PHONY: check_shebangs +check_shebangs: + @source test/make.sh && check_bash + +.PHONY: check_trailing_whitespace +check_trailing_whitespace: + @source test/make.sh && check_trailing_whitespace + +.PHONY: test_check_headers +test_check_headers: + @echo "Testing the validity of the header check" + @python test/test_verify_boilerplate.py + +.PHONY: check_headers +check_headers: + @echo "Checking file headers" + @python test/verify_boilerplate.py + +# Integration tests +.PHONY: test_integration +test_integration: + ./test/integration/gcloud/run.sh diff --git a/README.md b/README.md index c6e93b2aa1..1a2d9a5c19 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,193 @@ -# terraform-google-kubernetes-engine -A Cloud Foundation Toolkit Module: Opinionated Google Cloud Platform project creation and configuration with Shared VPC, IAM, APIs, etc. +# Terraform Kubernetes Engine Module + +This module handles opinionated Google Cloud Platform Kubernetes Engine cluster creation and configuration with Node Pools, IP MASQ, Network Policy, etc. + +## Requirements +### Google Cloud SDK +- [gcloud](https://cloud.google.com/sdk/install) +### Kubectl +- [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x +### Terraform plugins +- [Terraform](https://www.terraform.io/downloads.html) 0.10.x +- [terraform-provider-google](https://github.com/terraform-providers/terraform-provider-google) plugin v1.8.0 + +### Configure a Service Account +In order to execute this module you must have a Service Account with the following: + +#### Roles +The service account with the following roles: +- roles/compute.viewer on the project +- roles/container.clusterAdmin on the project + +### Enable API's +In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: + +- Compute Engine API - compute.googleapis.com +- Kubernetes Engine API - container.googleapis.com + +## Install + +### Terraform +Be sure you have the correct Terraform version (0.10.x), you can choose the binary here: +- https://releases.hashicorp.com/terraform/ + +## Usage +There are multiple examples included in the [examples](./examples/) folder but simple usage is as follows: + +```hcl +module "gke" { + source = "github.com/terraform-google-modules/terraform-google-kubernetes-engine" + credentials_path = "${local.credentials_file_path}" + project_id = "" + cluster_name = "gke-test-1" + region = "us-central1" + network = "vpc-01" + subnetwork = "us-central1-01" + ip_range_pods = "us-central1-01-gke-01-pods" + ip_range_services = "us-central1-01-gke-01-services" + node_service_account = "project-service-account@.iam.gserviceaccount.com" + http_load_balancing = false + horizontal_pod_autoscaling = true + kubernetes_dashboard = true + network_policy = true + + node_pools = [ + { + name = "default-node-pool" + machine_type = "n1-standard-2" + min_count = 1 + max_count = 100 + disk_size_gb = 100 + disk_type = "pd-standard" + image_type = "COS" + auto_repair = true + auto_upgrade = true + }, + ] + + node_pools_labels = { + all = {} + + default-node-pool = { + default-node-pool = "true" + } + } + + node_pools_taints = { + all = [] + + default-node-pool = [ + { + key = "default-node-pool" + value = "true" + effect = "PREFER_NO_SCHEDULE" + }, + ] + } + + node_pools_tags = { + all = [] + + default-node-pool = [ + "default-node-pool", + ] + } +} +``` + +Then perform the following commands on the root folder: + +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure + +#### Variables +Please refer the /variables.tf file for the required and optional variables. + +#### Outputs +Please refer the /outputs.tf file for the outputs that you can get with the `terraform output` command + +## Infrastructure +The resources/services/activations/deletions that this module will create/trigger are: +- Create a GKE cluster with the provided addons +- Create GKE Node Pool(s) with provided configuration and attach to cluster +- Replace the default kube-dns configmap if `stub_domains` are provided +- Activate network policy if `network_policy` is true +- Add `ip-masq-agent` configmap with provided `masq_non_masquerade_cidrs` if `network_policy` is true or `masq_config_enabled` is true + +## File structure +The project has the following folders and files: + +- /: root folder +- /examples: examples for using this module +- /scripts: Scripts for specific tasks on module (see Infrastructure section on this file) +- /test: Folders with files for testing the module (see Testing section on this file) +- /main.tf: main file for this module, contains all the resources to create +- /variables.tf: all the variables for the module +- /output.tf: the outputs of the module +- /readme.MD: this file + +## Testing + +### Requirements +- [bats](https://github.com/sstephenson/bats) 0.4.0 +- [jq](https://stedolan.github.io/jq/) 1.5 + +### Integration test +##### Terraform integration tests +The integration tests for this module are built with bats, basically the test checks the following: +- Perform `terraform init` command +- Perform `terraform get` command +- Perform `terraform plan` command and check that it'll create *n* resources, modify 0 resources and delete 0 resources +- Perform `terraform apply -auto-approve` command and check that it has created the *n* resources, modified 0 resources and deleted 0 resources +- Perform `terraform plan` command and check that it'll create 0 resources, modify 1 resources and delete 0 resources +- Perform `terraform apply -auto-approve` command and check that it has created 0 resources, modified 1 resources and deleted 0 resources +- Perform `gcloud` commands and check the infrastructure is in the desired state +- Perform `kubectl` commands and check the infrastructure is in the desired state +- Perform `terraform destroy -force` command and check that it has destroyed the *n* resources + +You can use the following command to run the integration test in the root folder + + `test/integration/gcloud/run.sh` + +### Linting +The makefile in this project will lint or sometimes just format any shell, +Python, golang, Terraform, or Dockerfiles. The linters will only be run if +the makefile finds files with the appropriate file extension. + +All of the linter checks are in the default make target, so you just have to +run + +``` +make -s +``` + +The -s is for 'silent'. Successful output looks like this + +``` +Running shellcheck +Running flake8 +Running go fmt and go vet +Running terraform validate +Running hadolint on Dockerfiles +Checking for required files +Testing the validity of the header check +.. +---------------------------------------------------------------------- +Ran 2 tests in 0.026s + +OK +Checking file headers +The following lines have trailing whitespace +``` + +The linters +are as follows: +* Shell - shellcheck. Can be found in homebrew +* Python - flake8. Can be installed with 'pip install flake8' +* Golang - gofmt. gofmt comes with the standard golang installation. golang +is a compiled language so there is no standard linter. +* Terraform - terraform has a built-in linter in the 'terraform validate' +command. +* Dockerfiles - hadolint. Can be found in homebrew diff --git a/auth.tf b/auth.tf new file mode 100644 index 0000000000..02e243eea2 --- /dev/null +++ b/auth.tf @@ -0,0 +1,30 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/****************************************** + Retrieve authentication token + *****************************************/ +data "google_client_config" "default" {} + +/****************************************** + Configure provider + *****************************************/ +provider "kubernetes" { + load_config_file = false + host = "https://${google_container_cluster.primary.endpoint}" + token = "${data.google_client_config.default.access_token}" + cluster_ca_certificate = "${base64decode(google_container_cluster.primary.master_auth.0.cluster_ca_certificate)}" +} diff --git a/dns.tf b/dns.tf new file mode 100644 index 0000000000..c01b758536 --- /dev/null +++ b/dns.tf @@ -0,0 +1,52 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/****************************************** + Delete default kube-dns configmap + *****************************************/ +resource "null_resource" "delete_default_kube_dns_configmap" { + count = "${local.custom_kube_dns_config ? 1 : 0}" + + provisioner "local-exec" { + command = "${path.module}/scripts/kube_auth_wrapper.sh ${var.project_id} ${var.credentials_path} ${var.region} ${var.cluster_name} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + } + + depends_on = ["google_container_cluster.primary", "google_container_node_pool.pools"] +} + +/****************************************** + Create kube-dns confimap + *****************************************/ +resource "kubernetes_config_map" "kube-dns" { + count = "${local.custom_kube_dns_config ? 1 : 0}" + + metadata { + name = "kube-dns" + namespace = "kube-system" + + labels { + maintained_by = "terraform" + } + } + + data { + stubDomains = <&2 echo "3 arguments expected. Exiting." + exit 1 +fi + +RESOURCE_NAMESPACE=$1 +RESOURCE_TYPE=$2 +RESOURCE_NAME=$3 + +RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exit 1) + +# Delete requested resource +if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then + echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" + else + echo "Deleting default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" + kubectl -n "${RESOURCE_NAMESPACE}" delete "${RESOURCE_TYPE}" "${RESOURCE_NAME}" + fi +else + echo "No default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" +fi diff --git a/scripts/kube_auth_wrapper.sh b/scripts/kube_auth_wrapper.sh new file mode 100755 index 0000000000..a5ba4410dd --- /dev/null +++ b/scripts/kube_auth_wrapper.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +if [ "$#" -lt 4 ]; then + >&2 echo "Not all expected arguments set." + exit 1 +fi + +PROJECT_ID=$1 +CREDENTIALS=$2 +LOCATION=$3 +CLUSTER_NAME=$4 + +shift 4 + +export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=$CREDENTIALS + +RANDOM_ID="${RANDOM}_${RANDOM}" +export TMPDIR="/tmp/kube_auth_wrapper_${CLUSTER_NAME}_${RANDOM_ID}" + +function cleanup { + rm -rf "${TMPDIR}" + echo "CLEANUP" +} +trap cleanup EXIT + +mkdir "${TMPDIR}" + +export KUBECONFIG="${TMPDIR}/config" + +gcloud --project="${PROJECT_ID}" container clusters --zone="${LOCATION}" get-credentials "${CLUSTER_NAME}" + +"$@" diff --git a/test/boilerplate/boilerplate.Dockerfile.txt b/test/boilerplate/boilerplate.Dockerfile.txt new file mode 100644 index 0000000000..b0c7da3d77 --- /dev/null +++ b/test/boilerplate/boilerplate.Dockerfile.txt @@ -0,0 +1,13 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/test/boilerplate/boilerplate.Makefile.txt b/test/boilerplate/boilerplate.Makefile.txt new file mode 100644 index 0000000000..b0c7da3d77 --- /dev/null +++ b/test/boilerplate/boilerplate.Makefile.txt @@ -0,0 +1,13 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/test/boilerplate/boilerplate.go.txt b/test/boilerplate/boilerplate.go.txt new file mode 100644 index 0000000000..557e16f064 --- /dev/null +++ b/test/boilerplate/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ diff --git a/test/boilerplate/boilerplate.py.txt b/test/boilerplate/boilerplate.py.txt new file mode 100644 index 0000000000..b0c7da3d77 --- /dev/null +++ b/test/boilerplate/boilerplate.py.txt @@ -0,0 +1,13 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/test/boilerplate/boilerplate.sh.txt b/test/boilerplate/boilerplate.sh.txt new file mode 100644 index 0000000000..2e94f3e551 --- /dev/null +++ b/test/boilerplate/boilerplate.sh.txt @@ -0,0 +1,13 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/test/boilerplate/boilerplate.tf.txt b/test/boilerplate/boilerplate.tf.txt new file mode 100644 index 0000000000..cfccff84ca --- /dev/null +++ b/test/boilerplate/boilerplate.tf.txt @@ -0,0 +1,15 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ diff --git a/test/boilerplate/boilerplate.xml.txt b/test/boilerplate/boilerplate.xml.txt new file mode 100644 index 0000000000..3d98cdc6e5 --- /dev/null +++ b/test/boilerplate/boilerplate.xml.txt @@ -0,0 +1,15 @@ + diff --git a/test/boilerplate/boilerplate.yaml.txt b/test/boilerplate/boilerplate.yaml.txt new file mode 100644 index 0000000000..b0c7da3d77 --- /dev/null +++ b/test/boilerplate/boilerplate.yaml.txt @@ -0,0 +1,13 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/test/integration/gcloud/.gitignore b/test/integration/gcloud/.gitignore new file mode 100644 index 0000000000..cdd7c19c8d --- /dev/null +++ b/test/integration/gcloud/.gitignore @@ -0,0 +1 @@ +config.sh diff --git a/test/integration/gcloud/integration.bats b/test/integration/gcloud/integration.bats new file mode 100755 index 0000000000..1fe3125b0d --- /dev/null +++ b/test/integration/gcloud/integration.bats @@ -0,0 +1,258 @@ +#!/usr/bin/env bats + +# #################################### # +# Terraform tests # +# #################################### # + +@test "Ensure that Terraform configures the dirs and download the plugins" { + + run terraform init + [ "$status" -eq 0 ] +} + +@test "Ensure that Terraform updates the plugins" { + + run terraform get + [ "$status" -eq 0 ] +} + +@test "Terraform plan, ensure connection and creation of resources" { + + run terraform plan + [ "$status" -eq 0 ] + [[ "$output" =~ 5\ to\ add ]] + [[ "$output" =~ 0\ to\ change ]] + [[ "$output" =~ 0\ to\ destroy ]] +} + +@test "Terraform apply" { + + run terraform apply -auto-approve + [ "$status" -eq 0 ] + [[ "$output" =~ 5\ added ]] + [[ "$output" =~ 0\ changed ]] + [[ "$output" =~ 0\ destroyed ]] +} + +@test "Terraform plan enable network policy" { + + run terraform plan + [ "$status" -eq 0 ] + [[ "$output" =~ 0\ to\ add ]] + [[ "$output" =~ 1\ to\ change ]] + [[ "$output" =~ 0\ to\ destroy ]] +} + +@test "Terraform apply" { + + run terraform apply -auto-approve + [ "$status" -eq 0 ] + [[ "$output" =~ 0\ added ]] + [[ "$output" =~ 1\ changed ]] + [[ "$output" =~ 0\ destroyed ]] +} + +# #################################### # +# gcloud tests # +# #################################### # + +@test "Test the api is activated" { + + run gcloud --project=${PROJECT_ID} services list + [ "$status" -eq 0 ] + [[ "$output" = *"container.googleapis.com"* ]] +} + +@test "Test the cluster is in running status" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json| jq -cre '.status'" + [ "$status" -eq 0 ] + [[ "$output" = "RUNNING" ]] +} + +@test "Test the cluster has the expected initial cluster version" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.initialClusterVersion'" + [ "$status" -eq 0 ] + [[ "$output" = "$KUBERNETES_VERSION" ]] +} + +@test "Test the cluster is in the expected region" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.zone'" + [ "$status" -eq 0 ] + [[ "$output" = "$REGION" ]] +} + +@test "Test the cluster is on the expected network" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.network'" + [ "$status" -eq 0 ] + [[ "$output" = "$NETWORK" ]] +} + +@test "Test the cluster is on the expected subnetwork" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.subnetwork'" + [ "$status" -eq 0 ] + [[ "$output" = "$SUBNETWORK" ]] +} + +@test "Test the cluster has the expected secondary ip range for pods" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.ipAllocationPolicy.clusterSecondaryRangeName'" + [ "$status" -eq 0 ] + [[ "$output" = "$IP_RANGE_PODS" ]] +} + +@test "Test the cluster has the expected secondary ip range for services" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.ipAllocationPolicy.servicesSecondaryRangeName'" + [ "$status" -eq 0 ] + [[ "$output" = "$IP_RANGE_SERVICES" ]] +} + +@test "Test the cluster has the expected addon settings" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.addonsConfig'" + [ "$status" -eq 0 ] + [[ "$output" = "{\"horizontalPodAutoscaling\":{},\"httpLoadBalancing\":{\"disabled\":true},\"kubernetesDashboard\":{},\"networkPolicyConfig\":{}}" ]] +} + +@test "Test default pool has no initial node count" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.nodePools[] | select(.name == \"default-pool\") | .initialNodeCount'" + [ "$status" -eq 1 ] + [[ "$output" = "null" ]] +} + +@test "Test default pool has not auto scaling enabled" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.nodePools[] | select(.name == \"default-pool\") | .autoscaling.enabled'" + [ "$status" -eq 1 ] + [[ "$output" = "null" ]] +} + +@test "Test pool-01 is expected version" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.nodePools[] | select(.name == \"pool-01\") | .version'" + [ "$status" -eq 0 ] + [[ "$output" = "$KUBERNETES_VERSION" ]] +} + +@test "Test pool-01 has auto scaling enabled" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.nodePools[] | select(.name == \"pool-01\") | .autoscaling.enabled'" + [ "$status" -eq 0 ] + [[ "$output" = "true" ]] +} + +@test "Test pool-01 has expected min node count" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.nodePools[] | select(.name == \"pool-01\") | .autoscaling.minNodeCount'" + [ "$status" -eq 0 ] + [[ "$output" = "1" ]] +} + +@test "Test pool-01 has expected max node count" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.nodePools[] | select(.name == \"pool-01\") | .autoscaling.maxNodeCount'" + [ "$status" -eq 0 ] + [[ "$output" = "2" ]] +} + +@test "Test pool-01 is expected machine type" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.nodePools[] | select(.name == \"pool-01\") | .config.machineType'" + [ "$status" -eq 0 ] + [[ "$output" = "n1-standard-1" ]] +} + +@test "Test pool-01 has expected disk size" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.nodePools[] | select(.name == \"pool-01\") | .config.diskSizeGb'" + [ "$status" -eq 0 ] + [[ "$output" = "30" ]] +} + +@test "Test pool-01 has expected labels" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.nodePools[] | select(.name == \"pool-01\") | .config.labels'" + [ "$status" -eq 0 ] + [[ "$output" = "{\"all_pools_label\":\"something\",\"cluster_name\":\"$CLUSTER_NAME\",\"node_pool\":\"pool-01\",\"pool_01_another_label\":\"no\",\"pool_01_label\":\"yes\"}" ]] +} + +@test "Test pool-01 has expected network tags" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.nodePools[] | select(.name == \"pool-01\") | .config.tags'" + [ "$status" -eq 0 ] + [[ "$output" = "[\"gke-$CLUSTER_NAME\",\"gke-$CLUSTER_NAME-pool-01\",\"all-node-network-tag\",\"pool-01-network-tag\"]" ]] +} + +@test "Test pool-01 has auto repair enabled" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.nodePools[] | select(.name == \"pool-01\") | .management.autoRepair'" + [ "$status" -eq 0 ] + [[ "$output" = "true" ]] +} + +@test "Test pool-01 has auto upgrade disabled" { + + run bash -c "gcloud --project=${PROJECT_ID} container clusters --region=${REGION} describe ${CLUSTER_NAME} --format=json | jq -cre '.nodePools[] | select(.name == \"pool-01\") | .management.autoUpgrade'" + [ "$status" -eq 1 ] + [[ "$output" = "null" ]] +} + +@test "Test getting kubectl credentials" { + + run gcloud --project=${PROJECT_ID} container clusters --region=${REGION} get-credentials ${CLUSTER_NAME} + [ "$status" -eq 0 ] + [[ "$output" = *"kubeconfig entry generated for ${CLUSTER_NAME}."* ]] +} + +# #################################### # +# kubectl tests # +# #################################### # + +@test "Test pool-01 has expected taints" { + + run bash -c "kubectl get nodes -o json -l node_pool=pool-01 | jq -cre '.items[0].spec.taints'" + [ "$status" -eq 0 ] + [[ "$output" = "[{\"effect\":\"PreferNoSchedule\",\"key\":\"all_pools_taint\",\"value\":\"true\"},{\"effect\":\"PreferNoSchedule\",\"key\":\"pool_01_taint\",\"value\":\"true\"},{\"effect\":\"PreferNoSchedule\",\"key\":\"pool_01_another_taint\",\"value\":\"true\"}]" ]] +} + +@test "Test kube dns configmap created" { + + run bash -c "kubectl -n kube-system get configmap -o json kube-dns | jq -cre '.metadata.labels.maintained_by'" + [ "$status" -eq 0 ] + [[ "$output" = "terraform" ]] +} + +@test "Test ip masq agent configmap created" { + + run bash -c "kubectl -n kube-system get configmap -o json ip-masq-agent | jq -cre '.metadata.labels.maintained_by'" + [ "$status" -eq 0 ] + [[ "$output" = "terraform" ]] +} + +# #################################### # +# Terraform plan test # +# #################################### # + +@test "Terraform plan, ensure no change" { + + run terraform plan + [ "$status" -eq 0 ] + [[ "$output" = *"No changes. Infrastructure is up-to-date."* ]] +} + +# #################################### # +# Terraform destroy test # +# #################################### # + +@test "Terraform destroy" { + + run terraform destroy -force + [ "$status" -eq 0 ] + [[ "$output" =~ 5\ destroyed ]] +} diff --git a/test/integration/gcloud/run.sh b/test/integration/gcloud/run.sh new file mode 100755 index 0000000000..a40ae9c0c9 --- /dev/null +++ b/test/integration/gcloud/run.sh @@ -0,0 +1,225 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TEMPDIR=$(pwd)/test/integration/tmp +TESTDIR=${BASH_SOURCE%/*} +export TEST_ID="modules_gke_integration_gcloud_${RANDOM}" +export KUBECONFIG="${TEMPDIR}/${TEST_ID}.kubeconfig" + +# Activate test working directory +function make_testdir() { + mkdir -p "$TEMPDIR" + cp -r "$TESTDIR"/* "$TEMPDIR" +} + +# Activate test config +function activate_config() { + # shellcheck disable=SC1091 + source config.sh + echo "$PROJECT_NAME" +} + +# Cleans the workdir +function clean_workdir() { + #rm -rf "$TEMPDIR" + + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" + unset CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE + +} + +# Creates the main.tf file for Terraform +function create_main_tf_file() { + echo "Creating main.tf file" + touch main.tf + cat < main.tf +locals { + credentials_file_path = "$CREDENTIALS_PATH" +} + +provider "google" { + credentials = "\${file(local.credentials_file_path)}" +} + +module "gke" { + source = "../../../" + region = "$REGION" + kubernetes_version = "$KUBERNETES_VERSION" + + credentials_path = "\${local.credentials_file_path}" + + node_service_account = "$NODE_SERVICE_ACCOUNT" + + cluster_name = "$CLUSTER_NAME" + cluster_description = "Test GKE cluster" + + project_id = "$PROJECT_ID" + network = "$NETWORK" + subnetwork = "$SUBNETWORK" + + network_policy = true + + ip_range_pods = "$IP_RANGE_PODS" + ip_range_services = "$IP_RANGE_SERVICES" + + stub_domains { + "example.com" = [ + "10.254.154.11", + "10.254.154.12", + ] + + "testola.com" = [ + "10.254.154.11", + "10.254.154.12", + ] + } + + ip_masq_non_masquerade_cidrs = [ + "10.0.0.0/8", + "192.168.20.0/24", + "192.168.21.0/24", + ] + + node_pools = [ + { + name = "pool-01" + machine_type = "n1-standard-1" + image_type = "COS" + initial_node_count = 2 + min_count = 1 + max_count = 2 + auto_upgrade = false + disk_size_gb = 30 + disk_type = "pd-standard" + }, + ] + node_pools_labels = { + all = { + all_pools_label = "something" + } + + pool-01 = { + pool_01_label = "yes" + pool_01_another_label = "no" + } + } + node_pools_taints = { + all = [ + { + key = "all_pools_taint" + value = "true" + effect = "PREFER_NO_SCHEDULE" + }, + ] + + pool-01 = [ + { + key = "pool_01_taint" + value = "true" + effect = "PREFER_NO_SCHEDULE" + }, + { + key = "pool_01_another_taint" + value = "true" + effect = "PREFER_NO_SCHEDULE" + }, + ] + } + node_pools_tags = { + all = [ + "all-node-network-tag", + ] + + pool-01 = [ + "pool-01-network-tag", + ] + } +} +EOF +} + +# Creates the outputs.tf file +function create_outputs_file() { + echo "Creating outputs.tf file" + touch outputs.tf + cat <<'EOF' > outputs.tf +output "cluster_name_example" { + value = "${module.gke.cluster_name}" +} + +output "region_example" { + value = "${module.gke.region}" +} + +output "endpoint_example" { + value = "${module.gke.ca_certificate}" +} + +output "ca_certificate_example" { + value = "${module.gke.ca_certificate}" +} + +output "min_master_version_example" { + value = "${module.gke.min_master_version}" +} + +output "master_version_example" { + value = "${module.gke.master_version}" +} + +output "node_version_example" { + value = "${module.gke.node_version}" +} + +output "http_load_balancing_example" { + value = "${module.gke.http_load_balancing_enabled}" +} + +output "horizontal_pod_autoscaling_example" { + value = "${module.gke.horizontal_pod_autoscaling_enabled}" +} + +output "kubernetes_dashboard_example" { + value = "${module.gke.kubernetes_dashboard_enabled}" +} + +output "node_pools_names_example" { + value = "${module.gke.node_pools_names}" +} + +EOF +} + +# Execute bats tests +function run_bats() { + # Call to bats + echo "Tests to execute: $(bats integration.bats -c)" + bats integration.bats +} + +# Preparing environment +make_testdir +cd "$TEMPDIR" || exit +activate_config +create_main_tf_file +create_outputs_file + +# Call to bats +run_bats + +# # # Clean the environment +cd - || exit +clean_workdir +echo "Integration test finished" diff --git a/test/integration/gcloud/sample.sh b/test/integration/gcloud/sample.sh new file mode 100644 index 0000000000..1615fd5cf8 --- /dev/null +++ b/test/integration/gcloud/sample.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +################################################################# +# PLEASE FILL THE VARIABLES WITH VALID VALUES FOR TESTING # +# DO NOT REMOVE ANY OF THE VARIABLES # +################################################################# + +## These values you *MUST* modify to match your environment +export PROJECT_ID="gke-test-integration" +export NETWORK="vpc-01" +export SUBNETWORK="us-east4-01" +export IP_RANGE_PODS="us-east4-01-gke-01-pod" +export IP_RANGE_SERVICES="us-east4-01-gke-01-service" +export CREDENTIALS_PATH="$HOME/sa-key.json" + +## These values you can potentially leave at the defaults +export CLUSTER_NAME="int-test-cluster-01" +export REGION="us-east4" +export KUBERNETES_VERSION="1.10.5-gke.4" +export NODE_SERVICE_ACCOUNT="" +export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=$CREDENTIALS_PATH diff --git a/test/make.sh b/test/make.sh new file mode 100755 index 0000000000..a9107fe62c --- /dev/null +++ b/test/make.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This function checks to make sure that every +# shebang has a '- e' flag, which causes it +# to exit on error +function check_bash() { +find . -name "*.sh" | while IFS= read -d '' -r file; +do + if [[ "$file" != *"bash -e"* ]]; + then + echo "$file is missing shebang with -e"; + exit 1; + fi; +done; +} + +# This function makes sure that the required files for +# releasing to OSS are present +function basefiles() { + echo "Checking for required files" + test -f LICENSE || echo "Missing LICENSE" + test -f README.md || echo "Missing README.md" +} + +# This function runs the hadolint linter on +# every file named 'Dockerfile' +function docker() { + echo "Running hadolint on Dockerfiles" + find . -name "Dockerfile" -exec hadolint {} \; +} + +# This function runs 'terraform validate' against all +# files ending in '.tf' +function check_terraform() { + echo "Running terraform validate" + #shellcheck disable=SC2156 + find . -name "*.tf" -exec bash -c 'terraform validate --check-variables=false $(dirname "{}")' \; +} + +# This function runs 'go fmt' and 'go vet' on every file +# that ends in '.go' +function golang() { + echo "Running go fmt and go vet" + find . -name "*.go" -exec go fmt {} \; + find . -name "*.go" -exec go vet {} \; +} + +# This function runs the flake8 linter on every file +# ending in '.py' +function check_python() { + echo "Running flake8" + find . -name "*.py" -exec flake8 {} \; +} + +# This function runs the shellcheck linter on every +# file ending in '.sh' +function check_shell() { + echo "Running shellcheck" + find . -name "*.sh" -exec shellcheck -x {} \; +} + +# This function makes sure that there is no trailing whitespace +# in any files in the project. +# There are some exclusions +function check_trailing_whitespace() { + echo "The following lines have trailing whitespace" + grep -r '[[:blank:]]$' --exclude-dir=".terraform" --exclude="*.png" --exclude="*.pyc" --exclude-dir=".git" . + rc=$? + if [ $rc = 0 ]; then + exit 1 + fi +} diff --git a/test/test_verify_boilerplate.py b/test/test_verify_boilerplate.py new file mode 100755 index 0000000000..22a3cca055 --- /dev/null +++ b/test/test_verify_boilerplate.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' A simple test for the verify_boilerplate python script. +This will create a set of test files, both valid and invalid, +and confirm that the has_valid_header call returns the correct +value. + +It also checks the number of files that are found by the +get_files call. +''' +from copy import deepcopy +from tempfile import mkdtemp +from shutil import rmtree +import unittest +from verify_boilerplate import has_valid_header, get_refs, get_regexs, \ + get_args, get_files + + +class AllTestCase(unittest.TestCase): + """ + All of the setup, teardown, and tests are contained in this + class. + """ + + def write_file(self, filename, content, expected): + """ + A utility method that creates test files, and adds them to + the cases that will be tested. + + Args: + filename: (string) the file name (path) to be created. + content: (list of strings) the contents of the file. + expected: (boolean) True if the header is expected to be valid, + false if not. + """ + + file = open(filename, 'w+') + for line in content: + file.write(line + "\n") + file.close() + self.cases[filename] = expected + + def create_test_files(self, tmp_path, extension, header): + """ + Creates 2 test files for .tf, .xml, .go, etc and one for + Dockerfile, and Makefile. + + The reason for the difference is that Makefile and Dockerfile + don't have an extension. These would be substantially more + difficult to create negative test cases, unless the files + were written, deleted, and re-written. + + Args: + tmp_path: (string) the path in which to create the files + extension: (string) the file extension + header: (list of strings) the header/boilerplate content + """ + + content = "\n...blah \ncould be code or could be garbage\n" + special_cases = ["Dockerfile", "Makefile"] + header_template = deepcopy(header) + valid_filename = tmp_path + extension + valid_content = header_template.append(content) + if extension not in special_cases: + # Invalid test cases for non-*file files (.tf|.py|.sh|.yaml|.xml..) + invalid_header = [] + for line in header_template: + if "2018" in line: + invalid_header.append(line.replace('2018', 'YEAR')) + else: + invalid_header.append(line) + invalid_header.append(content) + invalid_content = invalid_header + invalid_filename = tmp_path + "invalid." + extension + self.write_file(invalid_filename, invalid_content, False) + valid_filename = tmp_path + "testfile." + extension + + valid_content = header_template + self.write_file(valid_filename, valid_content, True) + + def setUp(self): + """ + Set initial counts and values, and initializes the setup of the + test files. + """ + self.cases = {} + self.tmp_path = mkdtemp() + "/" + self.my_args = get_args() + self.my_refs = get_refs(self.my_args) + self.my_regex = get_regexs() + self.prexisting_file_count = len( + get_files(self.my_refs.keys(), self.my_args)) + for key in self.my_refs: + self.create_test_files(self.tmp_path, key, + self.my_refs.get(key)) + + def tearDown(self): + """ Delete the test directory. """ + rmtree(self.tmp_path) + + def test_files_headers(self): + """ + Confirms that the expected output of has_valid_header is correct. + """ + for case in self.cases: + if self.cases[case]: + self.assertTrue(has_valid_header(case, self.my_refs, + self.my_regex)) + else: + self.assertFalse(has_valid_header(case, self.my_refs, + self.my_regex)) + + def test_invalid_count(self): + """ + Test that the initial files found isn't zero, indicating + a problem with the code. + """ + self.assertFalse(self.prexisting_file_count == 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/verify_boilerplate.py b/test/verify_boilerplate.py new file mode 100644 index 0000000000..a632fdedcc --- /dev/null +++ b/test/verify_boilerplate.py @@ -0,0 +1,279 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Verifies that all source files contain the necessary copyright boilerplate +# snippet. +# This is based on existing work +# https://github.com/kubernetes/test-infra/blob/master/hack +# /verify_boilerplate.py +from __future__ import print_function +import argparse +import glob +import os +import re +import sys + + +def get_args(): + """Parses command line arguments. + + Configures and runs argparse.ArgumentParser to extract command line + arguments. + + Returns: + An argparse.Namespace containing the arguments parsed from the + command line + """ + parser = argparse.ArgumentParser() + parser.add_argument("filenames", + help="list of files to check, " + "all files if unspecified", + nargs='*') + rootdir = os.path.dirname(__file__) + "/../" + rootdir = os.path.abspath(rootdir) + parser.add_argument( + "--rootdir", + default=rootdir, + help="root directory to examine") + + default_boilerplate_dir = os.path.join(rootdir, "test/boilerplate") + parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir) + return parser.parse_args() + + +def get_refs(ARGS): + """Converts the directory of boilerplate files into a map keyed by file + extension. + + Reads each boilerplate file's contents into an array, then adds that array + to a map keyed by the file extension. + + Returns: + A map of boilerplate lines, keyed by file extension. For example, + boilerplate.py.txt would result in the k,v pair {".py": py_lines} where + py_lines is an array containing each line of the file. + """ + refs = {} + + # Find and iterate over the absolute path for each boilerplate template + for path in glob.glob(os.path.join( + ARGS.boilerplate_dir, + "boilerplate.*.txt")): + extension = os.path.basename(path).split(".")[1] + ref_file = open(path, 'r') + ref = ref_file.read().splitlines() + ref_file.close() + refs[extension] = ref + return refs + + +# pylint: disable=too-many-locals +def has_valid_header(filename, refs, regexs): + """Test whether a file has the correct boilerplate header. + + Tests each file against the boilerplate stored in refs for that file type + (based on extension), or by the entire filename (eg Dockerfile, Makefile). + Some heuristics are applied to remove build tags and shebangs, but little + variance in header formatting is tolerated. + + Args: + filename: A string containing the name of the file to test + refs: A map of boilerplate headers, keyed by file extension + regexs: a map of compiled regex objects used in verifying boilerplate + + Returns: + True if the file has the correct boilerplate header, otherwise returns + False. + """ + try: + with open(filename, 'r') as fp: # pylint: disable=invalid-name + data = fp.read() + except IOError: + return False + basename = os.path.basename(filename) + extension = get_file_extension(filename) + if extension: + ref = refs[extension] + else: + ref = refs[basename] + # remove build tags from the top of Go files + if extension == "go": + con = regexs["go_build_constraints"] + (data, found) = con.subn("", data, 1) + # remove shebang + elif extension == "sh" or extension == "py": + she = regexs["shebang"] + (data, found) = she.subn("", data, 1) + data = data.splitlines() + # if our test file is smaller than the reference it surely fails! + if len(ref) > len(data): + return False + # trim our file to the same number of lines as the reference file + data = data[:len(ref)] + year = regexs["year"] + for datum in data: + if year.search(datum): + return False + + # if we don't match the reference at this point, fail + if ref != data: + return False + return True + + +def get_file_extension(filename): + """Extracts the extension part of a filename. + + Identifies the extension as everything after the last period in filename. + + Args: + filename: string containing the filename + + Returns: + A string containing the extension in lowercase + """ + return os.path.splitext(filename)[1].split(".")[-1].lower() + + +# These directories will be omitted from header checks +SKIPPED_DIRS = [ + 'Godeps', 'third_party', '_gopath', '_output', + '.git', 'vendor', '__init__.py', 'node_modules' +] + + +def normalize_files(files): + """Extracts the files that require boilerplate checking from the files + argument. + + A new list will be built. Each path from the original files argument will + be added unless it is within one of SKIPPED_DIRS. All relative paths will + be converted to absolute paths by prepending the root_dir path parsed from + the command line, or its default value. + + Args: + files: a list of file path strings + + Returns: + A modified copy of the files list where any any path in a skipped + directory is removed, and all paths have been made absolute. + """ + newfiles = [] + for pathname in files: + if any(x in pathname for x in SKIPPED_DIRS): + continue + newfiles.append(pathname) + for idx, pathname in enumerate(newfiles): + if not os.path.isabs(pathname): + newfiles[idx] = os.path.join(ARGS.rootdir, pathname) + return newfiles + + +def get_files(extensions, ARGS): + """Generates a list of paths whose boilerplate should be verified. + + If a list of file names has been provided on the command line, it will be + treated as the initial set to search. Otherwise, all paths within rootdir + will be discovered and used as the initial set. + + Once the initial set of files is identified, it is normalized via + normalize_files() and further stripped of any file name whose extension is + not in extensions. + + Args: + extensions: a list of file extensions indicating which file types + should have their boilerplate verified + + Returns: + A list of absolute file paths + """ + files = [] + if ARGS.filenames: + files = ARGS.filenames + else: + for root, dirs, walkfiles in os.walk(ARGS.rootdir): + # don't visit certain dirs. This is just a performance improvement + # as we would prune these later in normalize_files(). But doing it + # cuts down the amount of filesystem walking we do and cuts down + # the size of the file list + for dpath in SKIPPED_DIRS: + if dpath in dirs: + dirs.remove(dpath) + for name in walkfiles: + pathname = os.path.join(root, name) + files.append(pathname) + files = normalize_files(files) + outfiles = [] + for pathname in files: + basename = os.path.basename(pathname) + extension = get_file_extension(pathname) + if extension in extensions or basename in extensions: + outfiles.append(pathname) + return outfiles + + +def get_regexs(): + """Builds a map of regular expressions used in boilerplate validation. + + There are two scenarios where these regexes are used. The first is in + validating the date referenced is the boilerplate, by ensuring it is an + acceptable year. The second is in identifying non-boilerplate elements, + like shebangs and compiler hints that should be ignored when validating + headers. + + Returns: + A map of compiled regular expression objects, keyed by mnemonic. + """ + regexs = {} + # Search for "YEAR" which exists in the boilerplate, but shouldn't in the + # real thing + regexs["year"] = re.compile('YEAR') + # dates can be 2014, 2015, 2016 or 2017, company holder names can be + # anything + regexs["date"] = re.compile('(2014|2015|2016|2017|2018)') + # strip // +build \n\n build constraints + regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", + re.MULTILINE) + # strip #!.* from shell/python scripts + regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE) + return regexs + + +def main(args): + """Identifies and verifies files that should have the desired boilerplate. + + Retrieves the lists of files to be validated and tests each one in turn. + If all files contain correct boilerplate, this function terminates + normally. Otherwise it prints the name of each non-conforming file and + exists with a non-zero status code. + """ + regexs = get_regexs() + refs = get_refs(args) + filenames = get_files(refs.keys(), args) + nonconforming_files = [] + for filename in filenames: + if not has_valid_header(filename, refs, regexs): + nonconforming_files.append(filename) + if nonconforming_files: + print('%d files have incorrect boilerplate headers:' % len( + nonconforming_files)) + for filename in sorted(nonconforming_files): + print(os.path.relpath(filename, args.rootdir)) + sys.exit(1) + + +if __name__ == "__main__": + ARGS = get_args() + main(ARGS) diff --git a/variables.tf b/variables.tf new file mode 100644 index 0000000000..647064e038 --- /dev/null +++ b/variables.tf @@ -0,0 +1,160 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "The project ID to host the cluster in" +} + +variable "region" { + description = "The region to host the cluster in" +} + +variable "credentials_path" { + description = "Path to a Service Account credentials file with permissions documented in the readme" +} + +variable "cluster_name" { + description = "The name of the cluster" +} + +variable "cluster_description" { + description = "The description of the cluster" + default = "" +} + +variable "network" { + description = "The VPC network to host the cluster in" +} + +variable "subnetwork" { + description = "The subnetwork to host the cluster in" +} + +variable "kubernetes_version" { + description = "The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region." + default = "1.10.5-gke.0" +} + +variable "node_version" { + description = "The Kubernetes version of the node pools. Defaults kubernetes version (master) variable. Must set the same as master at initial creation." + default = "" +} + +variable "horizontal_pod_autoscaling" { + description = "Enable horizontal pod autoscaling addon" + default = true +} + +variable "http_load_balancing" { + description = "Enable httpload balancer addon" + default = false +} + +variable "kubernetes_dashboard" { + description = "Enable kubernetes dashboard addon" + default = true +} + +variable "network_policy" { + description = "Enable network policy addon" + default = false +} + +variable "maintenance_start_time" { + description = "Time window specified for daily maintenance operations in RFC3339 format" + default = "05:00" +} + +variable "ip_range_pods" { + description = "The secondary ip range to use for pods" +} + +variable "ip_range_services" { + description = "The secondary ip range to use for pods" +} + +variable "node_service_account" { + description = "Service account to associate to the nodes. Defaults to the compute default service account on the project.)" + default = "" +} + +variable "node_pools" { + type = "list" + description = "List of maps containing node pools" + + default = [ + { + name = "default-node-pool" + }, + ] +} + +variable "node_pools_labels" { + type = "map" + description = "Map of maps containing node labels by node-pool name" + + default = { + all = {} + default-node-pool = {} + } +} + +variable "node_pools_taints" { + type = "map" + description = "Map of lists containing node taints by node-pool name" + + default = { + all = [] + default-node-pool = [] + } +} + +variable "node_pools_tags" { + type = "map" + description = "Map of lists containing node network tags by node-pool name" + + default = { + all = [] + default-node-pool = [] + } +} + +variable "stub_domains" { + type = "map" + description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" + default = {} +} + +variable "ip_masq_config_enabled" { + description = "Enable IP Masquerade Agent Configmap. Automatically enables if network_policy enabled." + default = false +} + +variable "ip_masq_non_masquerade_cidrs" { + type = "list" + description = "List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading." + default = ["10.0.0.0/8"] +} + +variable "ip_masq_resync_interval" { + description = "The interval at which the agent attempts to sync its ConfigMap file from the disk." + default = "60s" +} + +variable "ip_masq_link_local" { + description = "Whether to masquerade traffic to the link-local prefix (169.254.0.0/16)." + default = "false" +}