Skip to content

Commit

Permalink
Testing other apporach
Browse files Browse the repository at this point in the history
  • Loading branch information
LeoDiazL committed Dec 19, 2023
1 parent 807f6f2 commit a713489
Showing 1 changed file with 72 additions and 70 deletions.
142 changes: 72 additions & 70 deletions operations/deployment/terraform/modules/aws/eks/aws_eks_cluster.tf
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ resource "aws_eks_cluster" "main" {
vpc_config {
security_group_ids = [aws_security_group.eks_security_group_master.id]
subnet_ids = data.aws_subnets.public.ids
endpoint_private_access = false
endpoint_private_access = true
endpoint_public_access = true
}

Expand Down Expand Up @@ -54,45 +54,45 @@ data "aws_eks_cluster_auth" "cluster_auth" {
name = aws_eks_cluster.main.id
}

resource "aws_launch_template" "main" {
network_interfaces {
associate_public_ip_address = true
security_groups = [aws_security_group.eks_security_group_worker.id]
}
#vpc_security_group_ids = [aws_security_group.eks_security_group_worker.id]
#iam_instance_profile {
# name = aws_iam_instance_profile.eks_inst_profile.name
#}
image_id = var.aws_eks_instance_ami_id != "" ? var.aws_eks_instance_ami_id : data.aws_ami.image_selected.id
instance_type = var.aws_eks_instance_type
name_prefix = "${var.aws_eks_environment}-eksworker"
user_data = base64encode(try(file("./aws_ec2_incoming_user_data_script.sh"), (var.aws_eks_instance_ami_id != "" ? local.node-userdata : "" )))
key_name = var.aws_eks_ec2_key_pair != "" ? var.aws_eks_ec2_key_pair : aws_key_pair.aws_key[0].id
update_default_version = true
monitoring {
enabled = true
}
lifecycle {
create_before_destroy = true
}
metadata_options {
http_endpoint = "enabled"
http_tokens = "optional"
http_put_response_hop_limit = 1
instance_metadata_tags = "enabled"
}
#tag_specifications {
# resource_type = "instance"
# tags = {
# # "kubernetes.io/cluster/${var.aws_eks_cluster_name}" = "owned",
# "Name" = "${var.aws_resource_identifier}-${var.aws_eks_environment}-eksworker-node"
# }
#}
#tags = {
# "kubernetes.io/cluster/${var.aws_eks_cluster_name}" = "owned"
#}
depends_on = [aws_iam_role.iam_role_worker]
}
#resource "aws_launch_template" "main" {
# network_interfaces {
# associate_public_ip_address = true
# security_groups = [aws_security_group.eks_security_group_worker.id]
# }
# #vpc_security_group_ids = [aws_security_group.eks_security_group_worker.id]
# #iam_instance_profile {
# # name = aws_iam_instance_profile.eks_inst_profile.name
# #}
# image_id = var.aws_eks_instance_ami_id != "" ? var.aws_eks_instance_ami_id : data.aws_ami.image_selected.id
# instance_type = var.aws_eks_instance_type
# name_prefix = "${var.aws_eks_environment}-eksworker"
# user_data = base64encode(try(file("./aws_ec2_incoming_user_data_script.sh"), (var.aws_eks_instance_ami_id != "" ? local.node-userdata : "" )))
# key_name = var.aws_eks_ec2_key_pair != "" ? var.aws_eks_ec2_key_pair : aws_key_pair.aws_key[0].id
# update_default_version = true
# monitoring {
# enabled = true
# }
# lifecycle {
# create_before_destroy = true
# }
# metadata_options {
# http_endpoint = "enabled"
# http_tokens = "optional"
# http_put_response_hop_limit = 1
# instance_metadata_tags = "enabled"
# }
# #tag_specifications {
# # resource_type = "instance"
# # tags = {
# # # "kubernetes.io/cluster/${var.aws_eks_cluster_name}" = "owned",
# # "Name" = "${var.aws_resource_identifier}-${var.aws_eks_environment}-eksworker-node"
# # }
# #}
# #tags = {
# # "kubernetes.io/cluster/${var.aws_eks_cluster_name}" = "owned"
# #}
# depends_on = [aws_iam_role.iam_role_worker]
#}

data "aws_ami" "image_selected" {
most_recent = true
Expand All @@ -105,31 +105,31 @@ data "aws_ami" "image_selected" {
}


locals {
#Userdata for nodes
node-userdata = <<USERDATA
#!/bin/bash
set -o xtrace
# These are used to install SSM Agent to SSH into the EKS nodes.
sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
systemctl enable amazon-ssm-agent
systemctl restart amazon-ssm-agent
/etc/eks/bootstrap.sh --apiserver-endpoint '${aws_eks_cluster.main.endpoint}' --b64-cluster-ca '${aws_eks_cluster.main.certificate_authority.0.data}' '${aws_eks_cluster.main.name}'
# Retrieve the necessary packages for `mount` to work properly with NFSv4.1
sudo yum update -y
sudo yum install -y amazon-efs-utils nfs-utils nfs-utils-lib
# after the eks bootstrap and necessary packages installation - restart kubelet
systemctl restart kubelet.service
# Take care of instance name by adding the launch order
INSTANCE_NAME_TAG=$(curl -s http://169.254.169.254/latest/meta-data/tags/instance/Name)
LOCAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
# Get Instance MetaData
REGION=$(curl -s http://169.254.169.254/latest/meta-data/placement/region)
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
aws ec2 create-tags --region $REGION --resources $INSTANCE_ID --tags Key=Name,Value=$INSTANCE_NAME_TAG-$LOCAL_IP
USERDATA
}

#locals {
# #Userdata for nodes
# node-userdata = <<USERDATA
# #!/bin/bash
# set -o xtrace
# # These are used to install SSM Agent to SSH into the EKS nodes.
# sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
# systemctl enable amazon-ssm-agent
# systemctl restart amazon-ssm-agent
# /etc/eks/bootstrap.sh --apiserver-endpoint '${aws_eks_cluster.main.endpoint}' --b64-cluster-ca '${aws_eks_cluster.main.certificate_authority.0.data}' '${aws_eks_cluster.main.name}'
# # Retrieve the necessary packages for `mount` to work properly with NFSv4.1
# sudo yum update -y
# sudo yum install -y amazon-efs-utils nfs-utils nfs-utils-lib
# # after the eks bootstrap and necessary packages installation - restart kubelet
# systemctl restart kubelet.service
# # Take care of instance name by adding the launch order
# INSTANCE_NAME_TAG=$(curl -s http://169.254.169.254/latest/meta-data/tags/instance/Name)
# LOCAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
# # Get Instance MetaData
# REGION=$(curl -s http://169.254.169.254/latest/meta-data/placement/region)
# INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
# aws ec2 create-tags --region $REGION --resources $INSTANCE_ID --tags Key=Name,Value=$INSTANCE_NAME_TAG-$LOCAL_IP
# USERDATA
#}
#
#resource "aws_autoscaling_group" "main" {
# desired_capacity = var.aws_eks_desired_capacity
# launch_template {
Expand Down Expand Up @@ -172,16 +172,18 @@ resource "aws_eks_node_group" "worker_nodes" {
max_unavailable = 1
}

launch_template {
id = aws_launch_template.main.id
version = "${aws_launch_template.main.latest_version}"
}
ami_type = "AL2_x86_64"

#launch_template {
# id = aws_launch_template.main.id
# version = "${aws_launch_template.main.latest_version}"
#}

depends_on = [
aws_iam_role.iam_role_worker,
aws_iam_role.iam_role_master,
aws_eks_cluster.main,
aws_launch_template.main,
#aws_launch_template.main,
aws_security_group.eks_security_group_master,
aws_security_group.eks_security_group_worker
]
Expand Down

0 comments on commit a713489

Please sign in to comment.