-
Notifications
You must be signed in to change notification settings - Fork 4.7k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Signed-off-by: Peter Hunt <[email protected]>
- Loading branch information
1 parent
34e3b67
commit b370f00
Showing
4 changed files
with
297 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,266 @@ | ||
package node | ||
|
||
import ( | ||
"context" | ||
"errors" | ||
"fmt" | ||
"strings" | ||
"time" | ||
|
||
"github.com/onsi/ginkgo/v2" | ||
g "github.com/onsi/ginkgo/v2" | ||
o "github.com/onsi/gomega" | ||
operatorv1client "github.com/openshift/client-go/operator/clientset/versioned" | ||
"github.com/openshift/origin/pkg/test/ginkgo/result" | ||
exutil "github.com/openshift/origin/test/extended/util" | ||
authorizationv1 "k8s.io/api/authorization/v1" | ||
v1 "k8s.io/api/core/v1" | ||
apierrors "k8s.io/apimachinery/pkg/api/errors" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
clientset "k8s.io/client-go/kubernetes" | ||
restclient "k8s.io/client-go/rest" | ||
"k8s.io/kubernetes/test/e2e/framework" | ||
) | ||
|
||
const ( | ||
nodesGroup = "system:nodes" | ||
nodeNamePrefix = "system:node:" | ||
desiredTestDuration = 25 * time.Minute | ||
) | ||
|
||
var _ = g.Describe("[sig-node][OCPFeatureGate:MinimumKubeletVersion] admission", func() { | ||
defer g.GinkgoRecover() | ||
|
||
oc := exutil.NewCLIWithoutNamespace("minimum-kubelet-version") | ||
|
||
g.DescribeTable("admission", func(version string, expectedErr bool) { | ||
defer updateMinimumKubeletVersion(oc, version, expectedErr) | ||
}, | ||
g.Entry("should allow an empty minimum kubelet version", "", false), | ||
g.Entry("should allow an old minimum kubelet version", "1.30.0", false), | ||
g.Entry("should not allow with a new minimum kubelet version", "1.100.0", true), | ||
g.Entry("should not allow with a new minimum kubelet version", "1.100.0", true), | ||
) | ||
}) | ||
|
||
type minimumKubeletVersionAuthTestCase struct { | ||
testName string | ||
kubeletVersion string | ||
testFunc func() | ||
} | ||
|
||
var _ = g.Describe("[sig-node][OCPFeatureGate:MinimumKubeletVersion] [Serial]", func() { | ||
defer g.GinkgoRecover() | ||
|
||
var ( | ||
nodeImpersonatingClient clientset.Interface | ||
nodeName = "fakenode" | ||
asUser = nodeNamePrefix + nodeName | ||
minimumVersion = "1.30.0" | ||
f = framework.NewDefaultFramework("minimum-kubelet-version") | ||
oc = exutil.NewCLIWithoutNamespace("minimum-kubelet-version") | ||
) | ||
|
||
g.BeforeEach(func() { | ||
g.DeferCleanup(updateMinimumKubeletVersionAndWait(oc, minimumVersion)) | ||
|
||
ginkgo.By("Creating a kubernetes client that impersonates a node") | ||
config, err := framework.LoadConfig() | ||
framework.ExpectNoError(err, "failed to load kubernetes client config") | ||
config.Impersonate = restclient.ImpersonationConfig{ | ||
UserName: asUser, | ||
Groups: []string{nodesGroup}, | ||
} | ||
nodeImpersonatingClient, err = clientset.NewForConfig(config) | ||
framework.ExpectNoError(err, "failed to create Clientset for the given config: %+v", *config) | ||
}) | ||
|
||
g.It("authorization", func() { | ||
testCases := []minimumKubeletVersionAuthTestCase{ | ||
{ | ||
testName: "should be able to list pods if new enough", | ||
kubeletVersion: "v1.30.0", | ||
testFunc: func() { | ||
_, err := nodeImpersonatingClient.CoreV1().Pods(f.Namespace.Name).List(context.Background(), metav1.ListOptions{}) | ||
o.Expect(err).NotTo(o.HaveOccurred()) | ||
}, | ||
}, | ||
{ | ||
testName: "should be able to get node", | ||
kubeletVersion: "v1.29.0", | ||
testFunc: func() { | ||
_, err := nodeImpersonatingClient.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) | ||
o.Expect(err).NotTo(o.HaveOccurred()) | ||
}, | ||
}, | ||
{ | ||
testName: "should be able to perform subjectaccessreviews", | ||
kubeletVersion: "v1.29.0", | ||
testFunc: func() { | ||
sar := &authorizationv1.SubjectAccessReview{ | ||
Spec: authorizationv1.SubjectAccessReviewSpec{ | ||
ResourceAttributes: &authorizationv1.ResourceAttributes{ | ||
Verb: "list", | ||
Resource: "configmaps", | ||
Namespace: f.Namespace.Name, | ||
Version: "v1", | ||
}, | ||
User: asUser, | ||
Groups: []string{nodesGroup}, | ||
}, | ||
} | ||
|
||
_, err := nodeImpersonatingClient.AuthorizationV1().SubjectAccessReviews().Create(context.Background(), sar, metav1.CreateOptions{}) | ||
o.Expect(err).NotTo(o.HaveOccurred()) | ||
}, | ||
}, | ||
{ | ||
testName: "should block node from listing pods if too old", | ||
kubeletVersion: "v1.29.0", | ||
testFunc: func() { | ||
_, err := nodeImpersonatingClient.CoreV1().Pods(f.Namespace.Name).List(context.Background(), metav1.ListOptions{}) | ||
o.Expect(err).To(o.HaveOccurred()) | ||
}, | ||
}, | ||
} | ||
// Do this sequentially instead of with a fancier mechanism like g.DescribeTable so we don't | ||
// need to fuss with BeforeSuite/AfterSuite business with rolling out a new apiserver | ||
for _, tc := range testCases { | ||
runMinimumKubeletVersionAuthTest(&tc, nodeName, asUser, nodeImpersonatingClient, f) | ||
} | ||
}) | ||
}) | ||
|
||
// runMinimumKubeletVersionAuthTest runs a test. It's done in a separate function to make cleaning up the created node less messy. | ||
func runMinimumKubeletVersionAuthTest(tc *minimumKubeletVersionAuthTestCase, nodeName, asUser string, nodeImpersonatingClient clientset.Interface, f *framework.Framework) { | ||
framework.Logf("authorization %s", tc.testName) | ||
node := &v1.Node{ | ||
ObjectMeta: metav1.ObjectMeta{Name: nodeName}, | ||
TypeMeta: metav1.TypeMeta{ | ||
Kind: "Node", | ||
APIVersion: "v1", | ||
}, | ||
Status: v1.NodeStatus{ | ||
NodeInfo: v1.NodeSystemInfo{ | ||
KubeletVersion: tc.kubeletVersion, | ||
}, | ||
}, | ||
} | ||
ginkgo.By(fmt.Sprintf("Create node %s by user: %v", nodeName, asUser)) | ||
_, err := nodeImpersonatingClient.CoreV1().Nodes().Create(context.Background(), node, metav1.CreateOptions{}) | ||
o.Expect(err).NotTo(o.HaveOccurred()) | ||
|
||
// Make a new scope so we are sure to cleanup even if the test function fails | ||
defer func() { | ||
if err := f.ClientSet.CoreV1().Nodes().Delete(context.Background(), node.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { | ||
o.Expect(err).NotTo(o.HaveOccurred()) | ||
} | ||
}() | ||
|
||
tc.testFunc() | ||
} | ||
|
||
func updateMinimumKubeletVersionAndWait(oc *exutil.CLI, version string) func() { | ||
ginkgo.By("Updating minimum kubelet version to " + version) | ||
operatorClient := oc.AdminOperatorClient() | ||
|
||
kasStatus, err := operatorClient.OperatorV1().KubeAPIServers().Get(context.Background(), "cluster", metav1.GetOptions{}) | ||
framework.ExpectNoError(err) | ||
|
||
undoFunc := updateMinimumKubeletVersion(oc, version, false) | ||
|
||
// and wait for it to rollout | ||
waitForAPIServerRollout(kasStatus.Status.LatestAvailableRevision, operatorClient) | ||
return func() { | ||
ginkgo.By("Reverting minimum kubelet version to \"\"") | ||
newKasStatus, err := operatorClient.OperatorV1().KubeAPIServers().Get(context.Background(), "cluster", metav1.GetOptions{}) | ||
framework.ExpectNoError(err) | ||
|
||
undoFunc() | ||
|
||
waitForAPIServerRollout(newKasStatus.Status.LatestAvailableRevision, operatorClient) | ||
} | ||
} | ||
|
||
func updateMinimumKubeletVersion(oc *exutil.CLI, version string, expectedErr bool) func() { | ||
nodesConfigOrig, err := oc.AdminConfigClient().ConfigV1().Nodes().Get(context.Background(), "cluster", metav1.GetOptions{}) | ||
o.Expect(err).NotTo(o.HaveOccurred()) | ||
|
||
nodesConfig := nodesConfigOrig.DeepCopy() | ||
nodesConfig.Spec.MinimumKubeletVersion = version | ||
_, err = oc.AdminConfigClient().ConfigV1().Nodes().Update(context.Background(), nodesConfig, metav1.UpdateOptions{}) | ||
if expectedErr { | ||
o.Expect(err).To(o.HaveOccurred()) | ||
} else { | ||
o.Expect(err).NotTo(o.HaveOccurred()) | ||
} | ||
return func() { | ||
nodesConfigCurrent, err := oc.AdminConfigClient().ConfigV1().Nodes().Get(context.Background(), "cluster", metav1.GetOptions{}) | ||
o.Expect(err).NotTo(o.HaveOccurred()) | ||
|
||
nodesConfigCurrent.Spec = *nodesConfigOrig.Spec.DeepCopy() | ||
|
||
_, err = oc.AdminConfigClient().ConfigV1().Nodes().Update(context.Background(), nodesConfigCurrent, metav1.UpdateOptions{}) | ||
o.Expect(err).NotTo(o.HaveOccurred()) | ||
} | ||
} | ||
|
||
func waitForAPIServerRollout(previousLatestRevision int32, operatorClient operatorv1client.Interface) { | ||
ctx := context.Background() | ||
// separate context so we exit our loop, but it is still possible to use the main context for client calls | ||
shouldEndTestCtx, shouldEndCancelFn := context.WithTimeout(ctx, desiredTestDuration) | ||
defer shouldEndCancelFn() | ||
|
||
errs := []error{} | ||
flakes := []error{} | ||
// ensure the kube-apiserver operator is stable | ||
nextLogTime := time.Now().Add(time.Minute) | ||
for { | ||
// prevent hot loops, the extra delay doesn't really matter | ||
time.Sleep(10 * time.Second) | ||
if shouldEndTestCtx.Err() != nil { | ||
break | ||
} | ||
|
||
// this may actually be flaky if the kube-apiserver is rolling out badly. Keep track of failures so we can | ||
// fail the run, but don't exit the test here. | ||
kasStatus, err := operatorClient.OperatorV1().KubeAPIServers().Get(ctx, "cluster", metav1.GetOptions{}) | ||
if err != nil { | ||
reportedErr := fmt.Errorf("failed reading clusteroperator, time=%v, err=%w", time.Now(), err) | ||
if strings.Contains(err.Error(), "http2: client connection lost") { | ||
flakes = append(flakes, reportedErr) | ||
continue | ||
} | ||
errs = append(errs, reportedErr) | ||
continue | ||
} | ||
|
||
// check to see that every node is at the latest revision | ||
latestRevision := kasStatus.Status.LatestAvailableRevision | ||
if latestRevision <= previousLatestRevision { | ||
framework.Logf("kube-apiserver still has not observed rollout: previousLatestRevision=%d, latestRevision=%d", previousLatestRevision, latestRevision) | ||
continue | ||
} | ||
|
||
nodeNotAtRevisionReasons := []string{} | ||
for _, nodeStatus := range kasStatus.Status.NodeStatuses { | ||
if nodeStatus.CurrentRevision != latestRevision { | ||
nodeNotAtRevisionReasons = append(nodeNotAtRevisionReasons, fmt.Sprintf("node/%v is at revision %d, not %d", nodeStatus.NodeName, nodeStatus.CurrentRevision, latestRevision)) | ||
} | ||
} | ||
if len(nodeNotAtRevisionReasons) == 0 { | ||
break | ||
} | ||
if time.Now().After(nextLogTime) { | ||
framework.Logf("kube-apiserver still not stable after rollout: %v", strings.Join(nodeNotAtRevisionReasons, "; ")) | ||
nextLogTime = time.Now().Add(time.Minute) | ||
} | ||
} | ||
|
||
if len(errs) > 0 { | ||
framework.ExpectNoError(errors.Join(errs...)) | ||
} | ||
if len(flakes) > 0 { | ||
result.Flakef("errors that will eventually be failures: %v", errors.Join(flakes...)) | ||
} | ||
} |
14 changes: 14 additions & 0 deletions
14
test/extended/util/annotate/generated/zz_generated.annotations.go
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters