Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

extended: add minimum kubelet version test #29353

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ require (
github.com/onsi/ginkgo/v2 v2.20.2
github.com/onsi/gomega v1.34.2
github.com/opencontainers/go-digest v1.0.0
github.com/openshift/api v0.0.0-20241001152557-e415140e5d5f
github.com/openshift/api v0.0.0-20241128230347-e456392b2cb4
github.com/openshift/apiserver-library-go v0.0.0-20241001175710-6064b62894a6
github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660
github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -670,8 +670,8 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 h1:R
github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/openshift/api v0.0.0-20241001152557-e415140e5d5f h1:ya1OmyZm3LIIxI3U9VE9Nyx3ehCHgBwxyFUPflYPWls=
github.com/openshift/api v0.0.0-20241001152557-e415140e5d5f/go.mod h1:Shkl4HanLwDiiBzakv+con/aMGnVE2MAGvoKp5oyYUo=
github.com/openshift/api v0.0.0-20241128230347-e456392b2cb4 h1:k0v6SS4fSCw73XkS6gNDfZdRqh+r7gJ81Haa+5Ab0VM=
github.com/openshift/api v0.0.0-20241128230347-e456392b2cb4/go.mod h1:Shkl4HanLwDiiBzakv+con/aMGnVE2MAGvoKp5oyYUo=
github.com/openshift/apiserver-library-go v0.0.0-20241001175710-6064b62894a6 h1:Wban+ggY6sbg611SQSOeavUeug2cRJGz0rEeXxTxIH0=
github.com/openshift/apiserver-library-go v0.0.0-20241001175710-6064b62894a6/go.mod h1:9Anrq7+DZmmw1Brchx4zmh26hAZbe6Dv7bGXRclnhYI=
github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660 h1:F0zE2bmdVvaEd18VXuGYQdJJ1FYJu4MIDW9PYZWc9No=
Expand Down
1 change: 1 addition & 0 deletions test/extended/include.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ import (
_ "github.com/openshift/origin/test/extended/machine_config"
_ "github.com/openshift/origin/test/extended/machines"
_ "github.com/openshift/origin/test/extended/networking"
_ "github.com/openshift/origin/test/extended/node"
_ "github.com/openshift/origin/test/extended/node_tuning"
_ "github.com/openshift/origin/test/extended/oauth"
_ "github.com/openshift/origin/test/extended/olm"
Expand Down
266 changes: 266 additions & 0 deletions test/extended/node/minimum_kubelet_version.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,266 @@
package node

import (
"context"
"errors"
"fmt"
"strings"
"time"

"github.com/onsi/ginkgo/v2"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
operatorv1client "github.com/openshift/client-go/operator/clientset/versioned"
"github.com/openshift/origin/pkg/test/ginkgo/result"
exutil "github.com/openshift/origin/test/extended/util"
authorizationv1 "k8s.io/api/authorization/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework"
)

const (
nodesGroup = "system:nodes"
nodeNamePrefix = "system:node:"
desiredTestDuration = 25 * time.Minute
)

var _ = g.Describe("[sig-node][OCPFeatureGate:MinimumKubeletVersion] admission", func() {
defer g.GinkgoRecover()

oc := exutil.NewCLIWithoutNamespace("minimum-kubelet-version")

g.DescribeTable("admission", func(version string, expectedErr bool) {
defer updateMinimumKubeletVersion(oc, version, expectedErr)
},
g.Entry("should allow an empty minimum kubelet version", "", false),
g.Entry("should allow an old minimum kubelet version", "1.30.0", false),
g.Entry("should not allow with a new minimum kubelet version", "1.100.0", true),
g.Entry("should not allow with a new minimum kubelet version", "1.100.0", true),
)
})

type minimumKubeletVersionAuthTestCase struct {
testName string
kubeletVersion string
testFunc func()
}

var _ = g.Describe("[sig-node][OCPFeatureGate:MinimumKubeletVersion] [Serial]", func() {
defer g.GinkgoRecover()

var (
nodeImpersonatingClient clientset.Interface
nodeName = "fakenode"
asUser = nodeNamePrefix + nodeName
minimumVersion = "1.30.0"
f = framework.NewDefaultFramework("minimum-kubelet-version")
oc = exutil.NewCLIWithoutNamespace("minimum-kubelet-version")
)

g.BeforeEach(func() {
g.DeferCleanup(updateMinimumKubeletVersionAndWait(oc, minimumVersion))

ginkgo.By("Creating a kubernetes client that impersonates a node")
config, err := framework.LoadConfig()
framework.ExpectNoError(err, "failed to load kubernetes client config")
config.Impersonate = restclient.ImpersonationConfig{
UserName: asUser,
Groups: []string{nodesGroup},
}
nodeImpersonatingClient, err = clientset.NewForConfig(config)
framework.ExpectNoError(err, "failed to create Clientset for the given config: %+v", *config)
})

g.It("authorization", func() {
testCases := []minimumKubeletVersionAuthTestCase{
{
testName: "should be able to list pods if new enough",
kubeletVersion: "v1.30.0",
testFunc: func() {
_, err := nodeImpersonatingClient.CoreV1().Pods(f.Namespace.Name).List(context.Background(), metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
},
},
{
testName: "should be able to get node",
kubeletVersion: "v1.29.0",
testFunc: func() {
_, err := nodeImpersonatingClient.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
},
},
{
testName: "should be able to perform subjectaccessreviews",
kubeletVersion: "v1.29.0",
testFunc: func() {
sar := &authorizationv1.SubjectAccessReview{
Spec: authorizationv1.SubjectAccessReviewSpec{
ResourceAttributes: &authorizationv1.ResourceAttributes{
Verb: "list",
Resource: "configmaps",
Namespace: f.Namespace.Name,
Version: "v1",
},
User: asUser,
Groups: []string{nodesGroup},
},
}

_, err := nodeImpersonatingClient.AuthorizationV1().SubjectAccessReviews().Create(context.Background(), sar, metav1.CreateOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
},
},
{
testName: "should block node from listing pods if too old",
kubeletVersion: "v1.29.0",
testFunc: func() {
_, err := nodeImpersonatingClient.CoreV1().Pods(f.Namespace.Name).List(context.Background(), metav1.ListOptions{})
o.Expect(err).To(o.HaveOccurred())
},
},
}
// Do this sequentially instead of with a fancier mechanism like g.DescribeTable so we don't
// need to fuss with BeforeSuite/AfterSuite business with rolling out a new apiserver
for _, tc := range testCases {
runMinimumKubeletVersionAuthTest(&tc, nodeName, asUser, nodeImpersonatingClient, f)
}
})
})

// runMinimumKubeletVersionAuthTest runs a test. It's done in a separate function to make cleaning up the created node less messy.
func runMinimumKubeletVersionAuthTest(tc *minimumKubeletVersionAuthTestCase, nodeName, asUser string, nodeImpersonatingClient clientset.Interface, f *framework.Framework) {
framework.Logf("authorization %s", tc.testName)
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: nodeName},
TypeMeta: metav1.TypeMeta{
Kind: "Node",
APIVersion: "v1",
},
Status: v1.NodeStatus{
NodeInfo: v1.NodeSystemInfo{
KubeletVersion: tc.kubeletVersion,
},
},
}
ginkgo.By(fmt.Sprintf("Create node %s by user: %v", nodeName, asUser))
_, err := nodeImpersonatingClient.CoreV1().Nodes().Create(context.Background(), node, metav1.CreateOptions{})
o.Expect(err).NotTo(o.HaveOccurred())

// Make a new scope so we are sure to cleanup even if the test function fails
defer func() {
if err := f.ClientSet.CoreV1().Nodes().Delete(context.Background(), node.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
o.Expect(err).NotTo(o.HaveOccurred())
}
}()

tc.testFunc()
}

func updateMinimumKubeletVersionAndWait(oc *exutil.CLI, version string) func() {
ginkgo.By("Updating minimum kubelet version to " + version)
operatorClient := oc.AdminOperatorClient()

kasStatus, err := operatorClient.OperatorV1().KubeAPIServers().Get(context.Background(), "cluster", metav1.GetOptions{})
framework.ExpectNoError(err)

undoFunc := updateMinimumKubeletVersion(oc, version, false)

// and wait for it to rollout
waitForAPIServerRollout(kasStatus.Status.LatestAvailableRevision, operatorClient)
return func() {
ginkgo.By("Reverting minimum kubelet version to \"\"")
newKasStatus, err := operatorClient.OperatorV1().KubeAPIServers().Get(context.Background(), "cluster", metav1.GetOptions{})
framework.ExpectNoError(err)

undoFunc()

waitForAPIServerRollout(newKasStatus.Status.LatestAvailableRevision, operatorClient)
}
}

func updateMinimumKubeletVersion(oc *exutil.CLI, version string, expectedErr bool) func() {
nodesConfigOrig, err := oc.AdminConfigClient().ConfigV1().Nodes().Get(context.Background(), "cluster", metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())

nodesConfig := nodesConfigOrig.DeepCopy()
nodesConfig.Spec.MinimumKubeletVersion = version
_, err = oc.AdminConfigClient().ConfigV1().Nodes().Update(context.Background(), nodesConfig, metav1.UpdateOptions{})
if expectedErr {
o.Expect(err).To(o.HaveOccurred())
} else {
o.Expect(err).NotTo(o.HaveOccurred())
}
return func() {
nodesConfigCurrent, err := oc.AdminConfigClient().ConfigV1().Nodes().Get(context.Background(), "cluster", metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())

nodesConfigCurrent.Spec = *nodesConfigOrig.Spec.DeepCopy()

_, err = oc.AdminConfigClient().ConfigV1().Nodes().Update(context.Background(), nodesConfigCurrent, metav1.UpdateOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
}
}

func waitForAPIServerRollout(previousLatestRevision int32, operatorClient operatorv1client.Interface) {
ctx := context.Background()
// separate context so we exit our loop, but it is still possible to use the main context for client calls
shouldEndTestCtx, shouldEndCancelFn := context.WithTimeout(ctx, desiredTestDuration)
defer shouldEndCancelFn()

errs := []error{}
flakes := []error{}
// ensure the kube-apiserver operator is stable
nextLogTime := time.Now().Add(time.Minute)
for {
// prevent hot loops, the extra delay doesn't really matter
time.Sleep(10 * time.Second)
if shouldEndTestCtx.Err() != nil {
break
}

// this may actually be flaky if the kube-apiserver is rolling out badly. Keep track of failures so we can
// fail the run, but don't exit the test here.
kasStatus, err := operatorClient.OperatorV1().KubeAPIServers().Get(ctx, "cluster", metav1.GetOptions{})
if err != nil {
reportedErr := fmt.Errorf("failed reading clusteroperator, time=%v, err=%w", time.Now(), err)
if strings.Contains(err.Error(), "http2: client connection lost") {
flakes = append(flakes, reportedErr)
continue
}
errs = append(errs, reportedErr)
continue
}

// check to see that every node is at the latest revision
latestRevision := kasStatus.Status.LatestAvailableRevision
if latestRevision <= previousLatestRevision {
framework.Logf("kube-apiserver still has not observed rollout: previousLatestRevision=%d, latestRevision=%d", previousLatestRevision, latestRevision)
continue
}

nodeNotAtRevisionReasons := []string{}
for _, nodeStatus := range kasStatus.Status.NodeStatuses {
if nodeStatus.CurrentRevision != latestRevision {
nodeNotAtRevisionReasons = append(nodeNotAtRevisionReasons, fmt.Sprintf("node/%v is at revision %d, not %d", nodeStatus.NodeName, nodeStatus.CurrentRevision, latestRevision))
}
}
if len(nodeNotAtRevisionReasons) == 0 {
break
}
if time.Now().After(nextLogTime) {
framework.Logf("kube-apiserver still not stable after rollout: %v", strings.Join(nodeNotAtRevisionReasons, "; "))
nextLogTime = time.Now().Add(time.Minute)
}
}

if len(errs) > 0 {
framework.ExpectNoError(errors.Join(errs...))
}
if len(flakes) > 0 {
result.Flakef("errors that will eventually be failures: %v", errors.Join(flakes...))
}
}
14 changes: 14 additions & 0 deletions test/extended/util/annotate/generated/zz_generated.annotations.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

21 changes: 19 additions & 2 deletions vendor/github.com/openshift/api/config/v1/types_cluster_version.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading