From b370f00899e58ba74edee39d03ae7cfadeb60664 Mon Sep 17 00:00:00 2001 From: Peter Hunt Date: Fri, 22 Nov 2024 17:30:31 -0500 Subject: [PATCH] add min kubelet version tests Signed-off-by: Peter Hunt --- test/extended/include.go | 1 + test/extended/node/minimum_kubelet_version.go | 266 ++++++++++++++++++ .../generated/zz_generated.annotations.go | 14 + zz_generated.manifests/test-reporting.yaml | 16 ++ 4 files changed, 297 insertions(+) create mode 100644 test/extended/node/minimum_kubelet_version.go diff --git a/test/extended/include.go b/test/extended/include.go index fd750e5288c0..0b3c14065f67 100644 --- a/test/extended/include.go +++ b/test/extended/include.go @@ -39,6 +39,7 @@ import ( _ "github.com/openshift/origin/test/extended/machine_config" _ "github.com/openshift/origin/test/extended/machines" _ "github.com/openshift/origin/test/extended/networking" + _ "github.com/openshift/origin/test/extended/node" _ "github.com/openshift/origin/test/extended/node_tuning" _ "github.com/openshift/origin/test/extended/oauth" _ "github.com/openshift/origin/test/extended/olm" diff --git a/test/extended/node/minimum_kubelet_version.go b/test/extended/node/minimum_kubelet_version.go new file mode 100644 index 000000000000..302f55cce67f --- /dev/null +++ b/test/extended/node/minimum_kubelet_version.go @@ -0,0 +1,266 @@ +package node + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/onsi/ginkgo/v2" + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + operatorv1client "github.com/openshift/client-go/operator/clientset/versioned" + "github.com/openshift/origin/pkg/test/ginkgo/result" + exutil "github.com/openshift/origin/test/extended/util" + authorizationv1 "k8s.io/api/authorization/v1" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + nodesGroup = "system:nodes" + nodeNamePrefix = "system:node:" + desiredTestDuration = 25 * time.Minute +) + +var _ = g.Describe("[sig-node][OCPFeatureGate:MinimumKubeletVersion] admission", func() { + defer g.GinkgoRecover() + + oc := exutil.NewCLIWithoutNamespace("minimum-kubelet-version") + + g.DescribeTable("admission", func(version string, expectedErr bool) { + defer updateMinimumKubeletVersion(oc, version, expectedErr) + }, + g.Entry("should allow an empty minimum kubelet version", "", false), + g.Entry("should allow an old minimum kubelet version", "1.30.0", false), + g.Entry("should not allow with a new minimum kubelet version", "1.100.0", true), + g.Entry("should not allow with a new minimum kubelet version", "1.100.0", true), + ) +}) + +type minimumKubeletVersionAuthTestCase struct { + testName string + kubeletVersion string + testFunc func() +} + +var _ = g.Describe("[sig-node][OCPFeatureGate:MinimumKubeletVersion] [Serial]", func() { + defer g.GinkgoRecover() + + var ( + nodeImpersonatingClient clientset.Interface + nodeName = "fakenode" + asUser = nodeNamePrefix + nodeName + minimumVersion = "1.30.0" + f = framework.NewDefaultFramework("minimum-kubelet-version") + oc = exutil.NewCLIWithoutNamespace("minimum-kubelet-version") + ) + + g.BeforeEach(func() { + g.DeferCleanup(updateMinimumKubeletVersionAndWait(oc, minimumVersion)) + + ginkgo.By("Creating a kubernetes client that impersonates a node") + config, err := framework.LoadConfig() + framework.ExpectNoError(err, "failed to load kubernetes client config") + config.Impersonate = restclient.ImpersonationConfig{ + UserName: asUser, + Groups: []string{nodesGroup}, + } + nodeImpersonatingClient, err = clientset.NewForConfig(config) + framework.ExpectNoError(err, "failed to create Clientset for the given config: %+v", *config) + }) + + g.It("authorization", func() { + testCases := []minimumKubeletVersionAuthTestCase{ + { + testName: "should be able to list pods if new enough", + kubeletVersion: "v1.30.0", + testFunc: func() { + _, err := nodeImpersonatingClient.CoreV1().Pods(f.Namespace.Name).List(context.Background(), metav1.ListOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + }, + }, + { + testName: "should be able to get node", + kubeletVersion: "v1.29.0", + testFunc: func() { + _, err := nodeImpersonatingClient.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + }, + }, + { + testName: "should be able to perform subjectaccessreviews", + kubeletVersion: "v1.29.0", + testFunc: func() { + sar := &authorizationv1.SubjectAccessReview{ + Spec: authorizationv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authorizationv1.ResourceAttributes{ + Verb: "list", + Resource: "configmaps", + Namespace: f.Namespace.Name, + Version: "v1", + }, + User: asUser, + Groups: []string{nodesGroup}, + }, + } + + _, err := nodeImpersonatingClient.AuthorizationV1().SubjectAccessReviews().Create(context.Background(), sar, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + }, + }, + { + testName: "should block node from listing pods if too old", + kubeletVersion: "v1.29.0", + testFunc: func() { + _, err := nodeImpersonatingClient.CoreV1().Pods(f.Namespace.Name).List(context.Background(), metav1.ListOptions{}) + o.Expect(err).To(o.HaveOccurred()) + }, + }, + } + // Do this sequentially instead of with a fancier mechanism like g.DescribeTable so we don't + // need to fuss with BeforeSuite/AfterSuite business with rolling out a new apiserver + for _, tc := range testCases { + runMinimumKubeletVersionAuthTest(&tc, nodeName, asUser, nodeImpersonatingClient, f) + } + }) +}) + +// runMinimumKubeletVersionAuthTest runs a test. It's done in a separate function to make cleaning up the created node less messy. +func runMinimumKubeletVersionAuthTest(tc *minimumKubeletVersionAuthTestCase, nodeName, asUser string, nodeImpersonatingClient clientset.Interface, f *framework.Framework) { + framework.Logf("authorization %s", tc.testName) + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + TypeMeta: metav1.TypeMeta{ + Kind: "Node", + APIVersion: "v1", + }, + Status: v1.NodeStatus{ + NodeInfo: v1.NodeSystemInfo{ + KubeletVersion: tc.kubeletVersion, + }, + }, + } + ginkgo.By(fmt.Sprintf("Create node %s by user: %v", nodeName, asUser)) + _, err := nodeImpersonatingClient.CoreV1().Nodes().Create(context.Background(), node, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + // Make a new scope so we are sure to cleanup even if the test function fails + defer func() { + if err := f.ClientSet.CoreV1().Nodes().Delete(context.Background(), node.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + o.Expect(err).NotTo(o.HaveOccurred()) + } + }() + + tc.testFunc() +} + +func updateMinimumKubeletVersionAndWait(oc *exutil.CLI, version string) func() { + ginkgo.By("Updating minimum kubelet version to " + version) + operatorClient := oc.AdminOperatorClient() + + kasStatus, err := operatorClient.OperatorV1().KubeAPIServers().Get(context.Background(), "cluster", metav1.GetOptions{}) + framework.ExpectNoError(err) + + undoFunc := updateMinimumKubeletVersion(oc, version, false) + + // and wait for it to rollout + waitForAPIServerRollout(kasStatus.Status.LatestAvailableRevision, operatorClient) + return func() { + ginkgo.By("Reverting minimum kubelet version to \"\"") + newKasStatus, err := operatorClient.OperatorV1().KubeAPIServers().Get(context.Background(), "cluster", metav1.GetOptions{}) + framework.ExpectNoError(err) + + undoFunc() + + waitForAPIServerRollout(newKasStatus.Status.LatestAvailableRevision, operatorClient) + } +} + +func updateMinimumKubeletVersion(oc *exutil.CLI, version string, expectedErr bool) func() { + nodesConfigOrig, err := oc.AdminConfigClient().ConfigV1().Nodes().Get(context.Background(), "cluster", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + nodesConfig := nodesConfigOrig.DeepCopy() + nodesConfig.Spec.MinimumKubeletVersion = version + _, err = oc.AdminConfigClient().ConfigV1().Nodes().Update(context.Background(), nodesConfig, metav1.UpdateOptions{}) + if expectedErr { + o.Expect(err).To(o.HaveOccurred()) + } else { + o.Expect(err).NotTo(o.HaveOccurred()) + } + return func() { + nodesConfigCurrent, err := oc.AdminConfigClient().ConfigV1().Nodes().Get(context.Background(), "cluster", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + nodesConfigCurrent.Spec = *nodesConfigOrig.Spec.DeepCopy() + + _, err = oc.AdminConfigClient().ConfigV1().Nodes().Update(context.Background(), nodesConfigCurrent, metav1.UpdateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + } +} + +func waitForAPIServerRollout(previousLatestRevision int32, operatorClient operatorv1client.Interface) { + ctx := context.Background() + // separate context so we exit our loop, but it is still possible to use the main context for client calls + shouldEndTestCtx, shouldEndCancelFn := context.WithTimeout(ctx, desiredTestDuration) + defer shouldEndCancelFn() + + errs := []error{} + flakes := []error{} + // ensure the kube-apiserver operator is stable + nextLogTime := time.Now().Add(time.Minute) + for { + // prevent hot loops, the extra delay doesn't really matter + time.Sleep(10 * time.Second) + if shouldEndTestCtx.Err() != nil { + break + } + + // this may actually be flaky if the kube-apiserver is rolling out badly. Keep track of failures so we can + // fail the run, but don't exit the test here. + kasStatus, err := operatorClient.OperatorV1().KubeAPIServers().Get(ctx, "cluster", metav1.GetOptions{}) + if err != nil { + reportedErr := fmt.Errorf("failed reading clusteroperator, time=%v, err=%w", time.Now(), err) + if strings.Contains(err.Error(), "http2: client connection lost") { + flakes = append(flakes, reportedErr) + continue + } + errs = append(errs, reportedErr) + continue + } + + // check to see that every node is at the latest revision + latestRevision := kasStatus.Status.LatestAvailableRevision + if latestRevision <= previousLatestRevision { + framework.Logf("kube-apiserver still has not observed rollout: previousLatestRevision=%d, latestRevision=%d", previousLatestRevision, latestRevision) + continue + } + + nodeNotAtRevisionReasons := []string{} + for _, nodeStatus := range kasStatus.Status.NodeStatuses { + if nodeStatus.CurrentRevision != latestRevision { + nodeNotAtRevisionReasons = append(nodeNotAtRevisionReasons, fmt.Sprintf("node/%v is at revision %d, not %d", nodeStatus.NodeName, nodeStatus.CurrentRevision, latestRevision)) + } + } + if len(nodeNotAtRevisionReasons) == 0 { + break + } + if time.Now().After(nextLogTime) { + framework.Logf("kube-apiserver still not stable after rollout: %v", strings.Join(nodeNotAtRevisionReasons, "; ")) + nextLogTime = time.Now().Add(time.Minute) + } + } + + if len(errs) > 0 { + framework.ExpectNoError(errors.Join(errs...)) + } + if len(flakes) > 0 { + result.Flakef("errors that will eventually be failures: %v", errors.Join(flakes...)) + } +} diff --git a/test/extended/util/annotate/generated/zz_generated.annotations.go b/test/extended/util/annotate/generated/zz_generated.annotations.go index d5b95cc582d0..3fb2c12ef8f1 100644 --- a/test/extended/util/annotate/generated/zz_generated.annotations.go +++ b/test/extended/util/annotate/generated/zz_generated.annotations.go @@ -1679,6 +1679,20 @@ var Annotations = map[string]string{ "[sig-node][Late] should not have pod creation failures due to systemd timeouts": " [Suite:openshift/conformance/parallel]", + "[sig-node][OCPFeatureGate:MinimumKubeletVersion] [Serial] authorization should be able to get node": " [Suite:openshift/conformance/serial]", + + "[sig-node][OCPFeatureGate:MinimumKubeletVersion] [Serial] authorization should be able to list pods if new enough": " [Suite:openshift/conformance/serial]", + + "[sig-node][OCPFeatureGate:MinimumKubeletVersion] [Serial] authorization should be able to perform subjectaccessreviews": " [Suite:openshift/conformance/serial]", + + "[sig-node][OCPFeatureGate:MinimumKubeletVersion] [Serial] authorization should block node from listing pods if too old": " [Suite:openshift/conformance/serial]", + + "[sig-node][OCPFeatureGate:MinimumKubeletVersion] admission admission should allow an empty minimum kubelet version": " [Suite:openshift/conformance/parallel]", + + "[sig-node][OCPFeatureGate:MinimumKubeletVersion] admission admission should allow an old minimum kubelet version": " [Suite:openshift/conformance/parallel]", + + "[sig-node][OCPFeatureGate:MinimumKubeletVersion] admission admission should not allow with a new minimum kubelet version": " [Suite:openshift/conformance/parallel]", + "[sig-node][Suite:openshift/nodes/realtime/latency][Disruptive] Real time kernel should meet latency requirements when tested with cyclictest": " [Serial]", "[sig-node][Suite:openshift/nodes/realtime/latency][Disruptive] Real time kernel should meet latency requirements when tested with hwlatdetect": " [Serial]", diff --git a/zz_generated.manifests/test-reporting.yaml b/zz_generated.manifests/test-reporting.yaml index a79212e9a66f..b86564a46ce9 100644 --- a/zz_generated.manifests/test-reporting.yaml +++ b/zz_generated.manifests/test-reporting.yaml @@ -122,6 +122,22 @@ spec: - testName: '[sig-instrumentation][OCPFeatureGate:MetricsCollectionProfiles] The collection profiles feature-set initially, in a homogeneous default environment, should expose default metrics' + - featureGate: MinimumKubeletVersion + tests: + - testName: '[sig-node][OCPFeatureGate:MinimumKubeletVersion] [Serial] authorization + should be able to get node' + - testName: '[sig-node][OCPFeatureGate:MinimumKubeletVersion] [Serial] authorization + should be able to list pods if new enough' + - testName: '[sig-node][OCPFeatureGate:MinimumKubeletVersion] [Serial] authorization + should be able to perform subjectaccessreviews' + - testName: '[sig-node][OCPFeatureGate:MinimumKubeletVersion] [Serial] authorization + should block node from listing pods if too old' + - testName: '[sig-node][OCPFeatureGate:MinimumKubeletVersion] admission admission + should allow an empty minimum kubelet version' + - testName: '[sig-node][OCPFeatureGate:MinimumKubeletVersion] admission admission + should allow an old minimum kubelet version' + - testName: '[sig-node][OCPFeatureGate:MinimumKubeletVersion] admission admission + should not allow with a new minimum kubelet version' - featureGate: NetworkDiagnosticsConfig tests: - testName: '[sig-network][OCPFeatureGate:NetworkDiagnosticsConfig][Serial] Should