Skip to content

Commit

Permalink
Master for Cluster, worker for node - SG fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
LeoDiazL committed Dec 20, 2023
1 parent 82cbc00 commit 9ebdda9
Show file tree
Hide file tree
Showing 9 changed files with 370 additions and 236 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -465,8 +465,8 @@ The following inputs can be used as `step.with` keys
| Name | Type | Description |
|------------------|---------|------------------------------------|
| `aws_eks_create` | Boolean | Define if an EKS cluster should be created |
| `aws_eks_security_group_name_master` | String | Define the security group name master. Defaults to `SG for ${var.aws_resource_identifier} - EKS Master`. |
| `aws_eks_security_group_name_worker` | String | Define the security group name worker. Defaults to `SG for ${var.aws_resource_identifier} - EKS Worker`. |
| `aws_eks_security_group_name_cluster` | String | Define the security group name master. Defaults to `SG for ${var.aws_resource_identifier} - EKS Master`. |
| `aws_eks_security_group_name_node` | String | Define the security group name worker. Defaults to `SG for ${var.aws_resource_identifier} - EKS Worker`. |
| `aws_eks_environment` | String | Specify the eks environment name. Defaults to `env` |
| `aws_eks_management_cidr` | String | Comma separated list of remote public CIDRs blocks to add it to Worker nodes security groups. |
| `aws_eks_allowed_ports` | String | Allow incoming traffic from this port. Accepts comma separated values, matching 1 to 1 with `aws_eks_allowed_ports_cidr`. |
Expand Down
8 changes: 4 additions & 4 deletions action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -905,10 +905,10 @@ inputs:
aws_eks_create:
description: 'Define if an EKS cluster should be created'
required: false
aws_eks_security_group_name_master:
aws_eks_security_group_name_cluster:
description: "SG for ${var.aws_resource_identifier} - ${var.aws_eks_environment} - EKS Master"
required: false
aws_eks_security_group_name_worker:
aws_eks_security_group_name_node:
description: "SG for ${var.aws_resource_identifier} - ${var.aws_eks_environment} - EKS Worker"
required: false
aws_eks_environment:
Expand Down Expand Up @@ -1409,8 +1409,8 @@ runs:

# AWS EKS
AWS_EKS_CREATE: ${{ inputs.aws_eks_create }}
AWS_EKS_SECURITY_GROUP_NAME_MASTER: ${{ inputs.aws_eks_security_group_name_master }}
AWS_EKS_SECURITY_GROUP_NAME_WORKER: ${{ inputs.aws_eks_security_group_name_worker }}
AWS_EKS_SECURITY_GROUP_NAME_CLUSTER: ${{ inputs.aws_eks_security_group_name_cluster }}
AWS_EKS_SECURITY_GROUP_NAME_NODE: ${{ inputs.aws_eks_security_group_name_node }}
AWS_EKS_ENVIRONMENT: ${{ inputs.aws_eks_environment }}
AWS_EKS_MANAGEMENT_CIDR: ${{ inputs.aws_eks_management_cidr }}
AWS_EKS_ALLOWED_PORTS: ${{ inputs.aws_eks_allowed_ports }}
Expand Down
8 changes: 4 additions & 4 deletions operations/_scripts/generate/generate_vars_terraform.sh
Original file line number Diff line number Diff line change
Expand Up @@ -354,8 +354,8 @@ fi
#-- EKS Cluster --#
if [[ $(alpha_only "$AWS_EKS_CREATE") == true ]]; then
aws_eks_create=$(generate_var aws_eks_create $AWS_EKS_CREATE)
aws_eks_security_group_name_master=$(generate_var aws_eks_security_group_name_master $AWS_EKS_SECURITY_GROUP_NAME_MASTER)
aws_eks_security_group_name_worker=$(generate_var aws_eks_security_group_name_worker $AWS_EKS_SECURITY_GROUP_NAME_WORKER)
aws_eks_security_group_name_cluster=$(generate_var aws_eks_security_group_name_cluster $AWS_EKS_SECURITY_GROUP_NAME_CLUSTER)
aws_eks_security_group_name_node=$(generate_var aws_eks_security_group_name_node $AWS_EKS_SECURITY_GROUP_NAME_NODE)
aws_eks_environment=$(generate_var aws_eks_environment $AWS_EKS_ENVIRONMENT)
aws_eks_management_cidr=$(generate_var aws_eks_management_cidr $AWS_EKS_MANAGEMENT_CIDR)
aws_eks_allowed_ports=$(generate_var aws_eks_allowed_ports $AWS_EKS_ALLOWED_PORTS)
Expand Down Expand Up @@ -676,8 +676,8 @@ $aws_ecr_additional_tags
#-- EKS --#
$aws_eks_create
$aws_eks_security_group_name_master
$aws_eks_security_group_name_worker
$aws_eks_security_group_name_cluster
$aws_eks_security_group_name_node
$aws_eks_environment
$aws_eks_management_cidr
$aws_eks_allowed_ports
Expand Down
8 changes: 4 additions & 4 deletions operations/deployment/terraform/aws/aws_variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -1542,14 +1542,14 @@ variable "aws_eks_create" {
default = false
}

variable "aws_eks_security_group_name_master" {
description = "aws aws_eks_security_group_name_master name"
variable "aws_eks_security_group_name_cluster" {
description = "aws aws_eks_security_group_name_cluster name"
type = string
default = ""
}

variable "aws_eks_security_group_name_worker" {
description = "aws aws_eks_security_group_name_worker name"
variable "aws_eks_security_group_name_node" {
description = "aws aws_eks_security_group_name_node name"
type = string
default = ""
}
Expand Down
4 changes: 2 additions & 2 deletions operations/deployment/terraform/aws/bitovi_main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -594,8 +594,8 @@ module "eks" {
count = var.aws_eks_create ? 1 : 0
# EKS
#aws_eks_create = var.aws_eks_create
aws_eks_security_group_name_master = var.aws_eks_security_group_name_master
aws_eks_security_group_name_worker = var.aws_eks_security_group_name_worker
aws_eks_security_group_name_cluster = var.aws_eks_security_group_name_cluster
aws_eks_security_group_name_node = var.aws_eks_security_group_name_node
aws_eks_environment = var.aws_eks_environment
aws_eks_management_cidr = var.aws_eks_management_cidr
aws_eks_allowed_ports = var.aws_eks_allowed_ports
Expand Down
251 changes: 130 additions & 121 deletions operations/deployment/terraform/modules/aws/eks/aws_eks_cluster.tf
Original file line number Diff line number Diff line change
Expand Up @@ -11,19 +11,19 @@ locals {
resource "aws_eks_cluster" "main" {
name = var.aws_eks_cluster_name # Cluster name is defined during the code-generation phase
version = var.aws_eks_cluster_version
role_arn = aws_iam_role.iam_role_master.arn
role_arn = aws_iam_role.iam_role_cluster.arn
vpc_config {
security_group_ids = [aws_security_group.eks_security_group_master.id]
security_group_ids = [aws_security_group.eks_security_group_cluster.id]
subnet_ids = data.aws_subnets.public.ids
endpoint_private_access = true
endpoint_public_access = true
}

depends_on = [
aws_iam_role.iam_role_master,
aws_security_group.eks_security_group_master,
aws_iam_role_policy_attachment.managed_policies_master,
aws_iam_role_policy_attachment.managed_policies_worker
aws_iam_role.iam_role_cluster,
aws_security_group.eks_security_group_cluster,
aws_iam_role_policy_attachment.managed_policies_cluster,
aws_iam_role_policy_attachment.managed_policies_node
]
enabled_cluster_log_types = local.aws_eks_cluster_log_types

Expand Down Expand Up @@ -60,45 +60,45 @@ data "aws_eks_cluster_auth" "cluster_auth" {
name = aws_eks_cluster.main.id
}

resource "aws_launch_template" "main" {
network_interfaces {
associate_public_ip_address = true
security_groups = [aws_security_group.eks_security_group_worker.id]
}
#vpc_security_group_ids = [aws_security_group.eks_security_group_worker.id]
#iam_instance_profile {
# name = aws_iam_instance_profile.eks_inst_profile.name
#}
image_id = var.aws_eks_instance_ami_id != "" ? var.aws_eks_instance_ami_id : data.aws_ami.image_selected.id
instance_type = var.aws_eks_instance_type
name_prefix = "${var.aws_eks_environment}-eksworker"
user_data = base64encode(try(file("./aws_ec2_incoming_user_data_script.sh"), (var.aws_eks_instance_ami_id != "" ? local.node-userdata : "" )))
key_name = var.aws_eks_ec2_key_pair != "" ? var.aws_eks_ec2_key_pair : aws_key_pair.aws_key[0].id
update_default_version = true
monitoring {
enabled = true
}
lifecycle {
create_before_destroy = true
}
metadata_options {
http_endpoint = "enabled"
http_tokens = "optional"
http_put_response_hop_limit = 1
instance_metadata_tags = "enabled"
}
#tag_specifications {
# resource_type = "instance"
# tags = {
# # "kubernetes.io/cluster/${var.aws_eks_cluster_name}" = "owned",
# "Name" = "${var.aws_resource_identifier}-${var.aws_eks_environment}-eksworker-node"
# }
#}
#tags = {
# "kubernetes.io/cluster/${var.aws_eks_cluster_name}" = "owned"
#}
depends_on = [aws_iam_role.iam_role_worker]
}
#resource "aws_launch_template" "main" {
# network_interfaces {
# associate_public_ip_address = true
# security_groups = [aws_security_group.eks_security_group_node.id]
# }
# #vpc_security_group_ids = [aws_security_group.eks_security_group_node.id]
# #iam_instance_profile {
# # name = aws_iam_instance_profile.eks_inst_profile.name
# #}
# image_id = var.aws_eks_instance_ami_id != "" ? var.aws_eks_instance_ami_id : data.aws_ami.image_selected.id
# instance_type = var.aws_eks_instance_type
# name_prefix = "${var.aws_eks_environment}-eksnode"
# user_data = base64encode(try(file("./aws_ec2_incoming_user_data_script.sh"), (var.aws_eks_instance_ami_id != "" ? local.node-userdata : "" )))
# key_name = var.aws_eks_ec2_key_pair != "" ? var.aws_eks_ec2_key_pair : aws_key_pair.aws_key[0].id
# update_default_version = true
# monitoring {
# enabled = true
# }
# lifecycle {
# create_before_destroy = true
# }
# metadata_options {
# http_endpoint = "enabled"
# http_tokens = "optional"
# http_put_response_hop_limit = 1
# instance_metadata_tags = "enabled"
# }
# #tag_specifications {
# # resource_type = "instance"
# # tags = {
# # # "kubernetes.io/cluster/${var.aws_eks_cluster_name}" = "owned",
# # "Name" = "${var.aws_resource_identifier}-${var.aws_eks_environment}-eksnode-node"
# # }
# #}
# #tags = {
# # "kubernetes.io/cluster/${var.aws_eks_cluster_name}" = "owned"
# #}
# depends_on = [aws_iam_role.iam_role_node]
#}

data "aws_ami" "image_selected" {
most_recent = true
Expand All @@ -111,90 +111,99 @@ data "aws_ami" "image_selected" {
}


locals {
#Userdata for nodes
node-userdata = <<USERDATA
#!/bin/bash
set -o xtrace
# These are used to install SSM Agent to SSH into the EKS nodes.
sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
systemctl enable amazon-ssm-agent
systemctl restart amazon-ssm-agent
/etc/eks/bootstrap.sh --apiserver-endpoint '${aws_eks_cluster.main.endpoint}' --b64-cluster-ca '${aws_eks_cluster.main.certificate_authority.0.data}' '${aws_eks_cluster.main.name}'
# Retrieve the necessary packages for `mount` to work properly with NFSv4.1
sudo yum update -y
sudo yum install -y amazon-efs-utils nfs-utils nfs-utils-lib
# after the eks bootstrap and necessary packages installation - restart kubelet
systemctl restart kubelet.service
# Take care of instance name by adding the launch order
INSTANCE_NAME_TAG=$(curl -s http://169.254.169.254/latest/meta-data/tags/instance/Name)
LOCAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
# Get Instance MetaData
REGION=$(curl -s http://169.254.169.254/latest/meta-data/placement/region)
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
aws ec2 create-tags --region $REGION --resources $INSTANCE_ID --tags Key=Name,Value=$INSTANCE_NAME_TAG-$LOCAL_IP
USERDATA
}

resource "aws_autoscaling_group" "main" {
desired_capacity = var.aws_eks_desired_capacity
launch_template {
id = aws_launch_template.main.id
version = "${aws_launch_template.main.latest_version}"
}
max_size = var.aws_eks_max_size
min_size = var.aws_eks_min_size
name = "${var.aws_resource_identifier}-${var.aws_eks_environment}-eksworker-asg"
vpc_zone_identifier = data.aws_subnets.private.ids
health_check_type = "EC2"

tag {
key = "Name"
value = "${var.aws_resource_identifier}-${var.aws_eks_environment}-eksworker-node"
propagate_at_launch = true
}

depends_on = [
aws_iam_role.iam_role_master,
aws_iam_role.iam_role_worker,
aws_security_group.eks_security_group_master,
aws_security_group.eks_security_group_worker
]
}

#resource "aws_eks_node_group" "worker_nodes" {
# cluster_name = aws_eks_cluster.main.name
# node_group_name = "${var.aws_resource_identifier}-ng"
# node_role_arn = aws_iam_role.iam_role_worker.arn
# subnet_ids = data.aws_subnets.private.ids
#
# scaling_config {
# desired_size = var.aws_eks_desired_capacity
# max_size = var.aws_eks_max_size
# min_size = var.aws_eks_min_size
# }
#locals {
# #Userdata for nodes
# node-userdata = <<USERDATA
# #!/bin/bash
# set -o xtrace
# # These are used to install SSM Agent to SSH into the EKS nodes.
# sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
# systemctl enable amazon-ssm-agent
# systemctl restart amazon-ssm-agent
# /etc/eks/bootstrap.sh --apiserver-endpoint '${aws_eks_cluster.main.endpoint}' --b64-cluster-ca '${aws_eks_cluster.main.certificate_authority.0.data}' '${aws_eks_cluster.main.name}'
# # Retrieve the necessary packages for `mount` to work properly with NFSv4.1
# sudo yum update -y
# sudo yum install -y amazon-efs-utils nfs-utils nfs-utils-lib
# # after the eks bootstrap and necessary packages installation - restart kubelet
# systemctl restart kubelet.service
# # Take care of instance name by adding the launch order
# INSTANCE_NAME_TAG=$(curl -s http://169.254.169.254/latest/meta-data/tags/instance/Name)
# LOCAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
# # Get Instance MetaData
# REGION=$(curl -s http://169.254.169.254/latest/meta-data/placement/region)
# INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
# aws ec2 create-tags --region $REGION --resources $INSTANCE_ID --tags Key=Name,Value=$INSTANCE_NAME_TAG-$LOCAL_IP
# USERDATA
#}
#
# update_config {
# max_unavailable = 1
#resource "aws_autoscaling_group" "main" {
# desired_capacity = var.aws_eks_desired_capacity
# launch_template {
# id = aws_launch_template.main.id
# version = "${aws_launch_template.main.latest_version}"
# }
# max_size = var.aws_eks_max_size
# min_size = var.aws_eks_min_size
# name = "${var.aws_resource_identifier}-${var.aws_eks_environment}-eksnode-asg"
# vpc_zone_identifier = data.aws_subnets.private.ids
# health_check_type = "EC2"
#
# ami_type = "AL2_x86_64"
#
# #launch_template {
# # id = aws_launch_template.main.id
# # version = "${aws_launch_template.main.latest_version}"
# #}
#tag {
# key = "Name"
# value = "${var.aws_resource_identifier}-${var.aws_eks_environment}-eksnode-node"
# propagate_at_launch = true
#}
#
# depends_on = [
# aws_iam_role.iam_role_worker,
# aws_iam_role.iam_role_master,
# aws_eks_cluster.main,
# #aws_launch_template.main,
# aws_security_group.eks_security_group_master,
# aws_security_group.eks_security_group_worker
# aws_iam_role.iam_role_cluster,
# aws_iam_role.iam_role_node,
# aws_security_group.eks_security_group_cluster,
# aws_security_group.eks_security_group_node
# ]
#}

resource "aws_eks_node_group" "node_nodes" {
cluster_name = aws_eks_cluster.main.name
node_group_name = "${var.aws_resource_identifier}-ng"
node_role_arn = aws_iam_role.iam_role_node.arn
subnet_ids = data.aws_subnets.private.ids

scaling_config {
desired_size = var.aws_eks_desired_capacity
max_size = var.aws_eks_max_size
min_size = var.aws_eks_min_size
}

update_config {
max_unavailable = 1
}

ami_type = "AL2_x86_64"

capacity_type = "ON_DEMAND"
disk_size = 20
instance_types = ["t3.medium"]
#instance_types = [var.aws_eks_instance_type]


remote_access {
ec2_ssh_key = aws_key_pair.aws_key.name
}
#launch_template {
# id = aws_launch_template.main.id
# version = "${aws_launch_template.main.latest_version}"
#}

depends_on = [
aws_iam_role.iam_role_node,
aws_iam_role.iam_role_cluster,
aws_eks_cluster.main,
#aws_launch_template.main,
aws_security_group.eks_security_group_cluster,
aws_security_group.eks_security_group_node
]
}

output "aws_eks_cluster_name" {
value = aws_eks_cluster.main.name
}
Expand Down
Loading

0 comments on commit 9ebdda9

Please sign in to comment.