Skip to content

Commit

Permalink
Address comments of the PR #325
Browse files Browse the repository at this point in the history
This commit addresses the comments of PR #325.

Signed-off-by: Ashish Ranjan <[email protected]>
  • Loading branch information
ashishranjan738 committed Dec 1, 2022
1 parent c4d9364 commit 70f02b9
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 12 deletions.
1 change: 1 addition & 0 deletions tests/pipelines/eks/awscli-cl2-load-with-addons.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ spec:
- name: slack-hook
- name: slack-message
- name: amp-workspace-id
default: ""
tasks:
- name: slack-notification
params:
Expand Down
1 change: 1 addition & 0 deletions tests/pipelines/eks/awscli-eks-cl2-load.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ spec:
- name: kubernetes-version
default: "1.23"
- name: amp-workspace-id
default: ""
tasks:
- name: slack-notification
params:
Expand Down
3 changes: 2 additions & 1 deletion tests/pipelines/eks/upstream-load.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,8 @@ spec:
- name: results-bucket
description: "Results bucket with path of s3 to upload results"
- name: amp-workspace-id
description: The remote amp workspace id where remote needs to happen.
description: The AMP workspace ID where remote write needs to happen.
default: ""
tasks:
- name: create-eks-cluster
taskRef:
Expand Down
13 changes: 10 additions & 3 deletions tests/tasks/generators/clusterloader/load.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ spec:
- name: cluster-name
description: The name of the EKS cluster you want to spin.
- name: amp-workspace-id
description: The remote amp workspace id where remote needs to happen.
description: The AMP workspace ID where remote write needs to happen.
default: ""
results:
- name: datapoint
description: Stores the CL2 result that can be consumed by other tasks (e.g. cloudwatch)
Expand Down Expand Up @@ -76,7 +77,9 @@ spec:
EOL
cat $(workspaces.source.path)/overrides.yaml
cp $(workspaces.source.path)/overrides.yaml $(workspaces.results.path)/overrides.yaml
# Enable Prometheus if the remote workspace id is provided
if [ -n "$(params.amp-workspace-id)" ]; then
# TODO: Currently pathing the prometheus manifests for remote write. Move this to upsteam going forward.
echo "volumeBindingMode: WaitForFirstConsumer" >> $(workspaces.source.path)/perf-tests/clusterloader2/pkg/prometheus/manifests/0ssd-storage-class.yaml
cat $(workspaces.source.path)/perf-tests/clusterloader2/pkg/prometheus/manifests/0ssd-storage-class.yaml
Expand All @@ -100,6 +103,7 @@ spec:
- url: http://localhost:8005/workspaces/$(params.amp-workspace-id)/api/v1/remote_write
EOF
cat $(workspaces.source.path)/perf-tests/clusterloader2/pkg/prometheus/manifests/prometheus-prometheus.yaml
fi
- name: run-loadtest
image: public.ecr.aws/kit/clusterloader2:0213bea
onError: continue
Expand All @@ -109,9 +113,12 @@ spec:
if [ -n "$(params.endpoint)" ]; then
ENDPOINT_FLAG="--endpoint $(params.endpoint)"
fi
if [ -n "$(params.amp-workspace-id)" ]; then
CL2_PROMETHEUS_FLAGS="--enable-prometheus-server=true --prometheus-storage-class-provisioner kubernetes.io/aws-ebs --prometheus-storage-class-volume-type gp2 --prometheus-manifest-path=$(workspaces.source.path)/perf-tests/clusterloader2/pkg/prometheus/manifests/"
fi
aws eks $ENDPOINT_FLAG update-kubeconfig --name $(params.cluster-name) --region $(params.region)
cat $(workspaces.source.path)/perf-tests/clusterloader2/testing/load/config.yaml
ENABLE_EXEC_SERVICE=false /clusterloader --kubeconfig=/root/.kube/config --testconfig=$(workspaces.source.path)/perf-tests/clusterloader2/testing/load/config.yaml --testoverrides=$(workspaces.source.path)/overrides.yaml --nodes=$(params.nodes) --provider=eks --report-dir=$(workspaces.results.path) --alsologtostderr --v=2
ENABLE_EXEC_SERVICE=false /clusterloader --kubeconfig=/root/.kube/config --testconfig=$(workspaces.source.path)/perf-tests/clusterloader2/testing/load/config.yaml --testoverrides=$(workspaces.source.path)/overrides.yaml --nodes=$(params.nodes) --provider=eks --report-dir=$(workspaces.results.path) --alsologtostderr --v=2 $CL2_PROMETHEUS_FLAGS
if [ $? -eq 0 ]; then
echo "1" | tee $(results.datapoint.path)
else
Expand Down
22 changes: 14 additions & 8 deletions tests/tasks/setup/eks/awscli-mng.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -49,21 +49,27 @@ spec:
--query cluster.resourcesVpcConfig.subnetIds --output text \
)
nodes=$(params.desired-nodes)
nodes=$(($(params.desired-nodes)-1))
asgs=$((nodes/1000))
asg_name=$(params.cluster-name)-nodes
create_dp()
{
CREATED_NODEGROUP=$(aws eks $ENDPOINT_FLAG --region $(params.region) list-nodegroups --cluster-name $(params.cluster-name) --query 'nodegroups[?@==`'$asg_name-$1'`]' --output text)
#ToDo: paramterize instance-types
echo $3
EC2_INSTANCES=$3
echo $EC2_INSTANCES
if [ "$EC2_INSTANCES" == "" ]; then
# Defaulting to below instances if no other instances has been provided in the parameter.
EC2_INSTANCES="c5.large m5.large r5.large t3.large t3a.large c5a.large m5a.large r5a.large"
fi
if [ "$CREATED_NODEGROUP" == "" ]; then
#create node group
aws eks $ENDPOINT_FLAG create-nodegroup \
--cluster-name $(params.cluster-name) \
--nodegroup-name $asg_name-$1 \
--node-role $(params.host-cluster-node-role-arn) \
--region $(params.region) \
--instance-types $3 \
--instance-types $EC2_INSTANCES \
--scaling-config minSize=$(params.min-nodes),maxSize=$2,desiredSize=$2 \
--subnets $NG_SUBNETS
fi
Expand All @@ -77,15 +83,15 @@ spec:
for i in $(seq 1 $asgs)
do
#max number of nodes MNG allows per ASG
create_dp $i 1000 c5.large,m5.large,r5.large,t3.large,t3a.large,c5a.large,m5a.large,r5a.large
create_dp $i 1000
done
remaining_nodes=$((nodes%1000))
remaining_nodes=$(((nodes)%1000))
if [[ $remaining_nodes -gt 0 ]]
then
echo "The remaining_nodes var is greater than 0."
create_dp 0 $remaining_nodes c5.large,m5.large,r5.large,t3.large,t3a.large,c5a.large,m5a.large,r5a.large
create_dp 0 $remaining_nodes
fi
# Creating an extra asg with 1 node for prometheus server pod scheduling.
# Creating an extra asg with 1 large node to ensure prometheus server has a scheduling space.
create_dp $(($asgs + 1)) 1 m5.4xlarge
- name: validate-nodes
image: alpine/k8s:1.22.6
Expand All @@ -104,6 +110,6 @@ spec:
while true; do
ready_node=$(kubectl get nodes 2>/dev/null | grep -w Ready | wc -l)
echo "ready-nodes=$ready_node"
if [[ "$ready_node" -eq $(($(params.desired-nodes) + 1)) ]]; then break; fi
if [[ "$ready_node" -eq $(params.desired-nodes) ]]; then break; fi
sleep 5
done

0 comments on commit 70f02b9

Please sign in to comment.