-
Notifications
You must be signed in to change notification settings - Fork 4.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
SDN-5017: Add UDN Network Policy e2e tests #29195
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -132,19 +132,35 @@ func getClusterNetwork(c operatorclientv1.NetworkInterface) *operatorv1.Network | |
} | ||
|
||
func podShouldReach(oc *exutil.CLI, podName, address string) { | ||
namespacePodShouldReach(oc, "", podName, address) | ||
} | ||
|
||
func namespacePodShouldReach(oc *exutil.CLI, namespace, podName, address string) { | ||
out := "" | ||
o.EventuallyWithOffset(1, func() error { | ||
var err error | ||
out, err = oc.AsAdmin().Run("exec").Args(podName, "--", "curl", "--connect-timeout", "1", address).Output() | ||
if namespace == "" { | ||
out, err = oc.AsAdmin().Run("exec").Args(podName, "--", "curl", "--connect-timeout", "1", address).Output() | ||
} else { | ||
out, err = oc.AsAdmin().Run("exec").Args(podName, "-n", namespace, "--", "curl", "--connect-timeout", "1", address).Output() | ||
} | ||
return err | ||
}, "30s", "1s").ShouldNot(o.HaveOccurred(), "cmd output: %s", out) | ||
} | ||
|
||
func podShouldNotReach(oc *exutil.CLI, podName, address string) { | ||
namespacePodShouldNotReach(oc, "", podName, address) | ||
} | ||
|
||
func namespacePodShouldNotReach(oc *exutil.CLI, namespace, podName, address string) { | ||
out := "" | ||
o.EventuallyWithOffset(1, func() error { | ||
var err error | ||
out, err = oc.AsAdmin().Run("exec").Args(podName, "--", "curl", "--connect-timeout", "1", address).Output() | ||
if namespace == "" { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same thing here |
||
out, err = oc.AsAdmin().Run("exec").Args(podName, "--", "curl", "--connect-timeout", "1", address).Output() | ||
} else { | ||
out, err = oc.AsAdmin().Run("exec").Args(podName, "-n", namespace, "--", "curl", "--connect-timeout", "1", address).Output() | ||
} | ||
return err | ||
}, "30s", "1s").Should(o.HaveOccurred(), "cmd output: %s", out) | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,333 @@ | ||
package networking | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"net" | ||
"strings" | ||
|
||
nadclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1" | ||
"github.com/onsi/ginkgo/v2" | ||
"github.com/onsi/gomega" | ||
exutil "github.com/openshift/origin/test/extended/util" | ||
|
||
v1 "k8s.io/api/core/v1" | ||
knet "k8s.io/api/networking/v1" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/apimachinery/pkg/util/rand" | ||
clientset "k8s.io/client-go/kubernetes" | ||
"k8s.io/kubernetes/test/e2e/framework" | ||
admissionapi "k8s.io/pod-security-admission/api" | ||
) | ||
|
||
var _ = ginkgo.Describe("[sig-network][OCPFeatureGate:NetworkSegmentation][Feature:UserDefinedPrimaryNetworks] Network Policies", func() { | ||
defer ginkgo.GinkgoRecover() | ||
|
||
oc := exutil.NewCLIWithPodSecurityLevel("network-segmentation-policy-e2e", admissionapi.LevelPrivileged) | ||
f := oc.KubeFramework() | ||
InOVNKubernetesContext(func() { | ||
const ( | ||
nodeHostnameKey = "kubernetes.io/hostname" | ||
nadName = "tenant-red" | ||
userDefinedNetworkIPv4Subnet = "203.203.0.0/16" | ||
userDefinedNetworkIPv6Subnet = "2014:100:200::0/60" | ||
port = 9000 | ||
netPrefixLengthPerNode = 24 | ||
randomStringLength = 5 | ||
nameSpaceYellowSuffix = "yellow" | ||
namespaceBlueSuffix = "blue" | ||
) | ||
|
||
var ( | ||
cs clientset.Interface | ||
nadClient nadclient.K8sCniCncfIoV1Interface | ||
allowServerPodLabel = map[string]string{"foo": "bar"} | ||
denyServerPodLabel = map[string]string{"abc": "xyz"} | ||
) | ||
|
||
ginkgo.BeforeEach(func() { | ||
cs = f.ClientSet | ||
|
||
var err error | ||
nadClient, err = nadclient.NewForConfig(f.ClientConfig()) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
|
||
namespaceYellow := getNamespaceName(f, nameSpaceYellowSuffix) | ||
namespaceBlue := getNamespaceName(f, namespaceBlueSuffix) | ||
for _, namespace := range []string{namespaceYellow, namespaceBlue} { | ||
ginkgo.By("Creating namespace " + namespace) | ||
ns, err := cs.CoreV1().Namespaces().Create(context.Background(), &v1.Namespace{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: namespace, | ||
}, | ||
}, metav1.CreateOptions{}) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
f.AddNamespacesToDelete(ns) | ||
} | ||
}) | ||
|
||
ginkgo.AfterEach(func() { | ||
if ginkgo.CurrentSpecReport().Failed() { | ||
exutil.DumpPodStatesInNamespace(f.Namespace.Name, oc) | ||
exutil.DumpPodStatesInNamespace(getNamespaceName(f, nameSpaceYellowSuffix), oc) | ||
exutil.DumpPodStatesInNamespace(getNamespaceName(f, namespaceBlueSuffix), oc) | ||
} | ||
}) | ||
|
||
ginkgo.DescribeTable( | ||
"pods within namespace should be isolated when deny policy is present", | ||
func( | ||
topology string, | ||
clientPodConfig podConfiguration, | ||
serverPodConfig podConfiguration, | ||
) { | ||
ginkgo.By("Creating the attachment configuration") | ||
netConfig := newNetworkAttachmentConfig(networkAttachmentConfigParams{ | ||
name: nadName, | ||
topology: topology, | ||
cidr: correctCIDRFamily(oc, userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), | ||
role: "primary", | ||
}) | ||
netConfig.namespace = f.Namespace.Name | ||
_, err := nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create( | ||
context.Background(), | ||
generateNAD(netConfig), | ||
metav1.CreateOptions{}, | ||
) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
|
||
workerNodes, err := getWorkerNodesOrdered(cs) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
gomega.Expect(len(workerNodes)).To(gomega.BeNumerically(">=", 1)) | ||
|
||
ginkgo.By("creating client/server pods") | ||
clientPodConfig.namespace = f.Namespace.Name | ||
clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: workerNodes[0].Name} | ||
serverPodConfig.namespace = f.Namespace.Name | ||
serverPodConfig.nodeSelector = map[string]string{nodeHostnameKey: workerNodes[len(workerNodes)-1].Name} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. OK, so in case of a single-node cluster, this well schedule the server pod on the only available node. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yes, right. guess that should be fine. |
||
runUDNPod(cs, f.Namespace.Name, serverPodConfig, nil) | ||
runUDNPod(cs, f.Namespace.Name, clientPodConfig, nil) | ||
|
||
var serverIP string | ||
for i, cidr := range strings.Split(netConfig.cidr, ",") { | ||
if cidr != "" { | ||
ginkgo.By("asserting the server pod has an IP from the configured range") | ||
serverIP, err = podIPsForUserDefinedPrimaryNetwork( | ||
cs, | ||
f.Namespace.Name, | ||
serverPodConfig.name, | ||
namespacedName(f.Namespace.Name, netConfig.name), | ||
i, | ||
) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
const netPrefixLengthPerNode = 24 | ||
ginkgo.By(fmt.Sprintf("asserting the server pod IP %v is from the configured range %v/%v", serverIP, cidr, netPrefixLengthPerNode)) | ||
subnet, err := getNetCIDRSubnet(cidr) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
gomega.Expect(inRange(subnet, serverIP)).To(gomega.Succeed()) | ||
} | ||
|
||
ginkgo.By("asserting the *client* pod can contact the server pod exposed endpoint") | ||
podShouldReach(oc, clientPodConfig.name, formatHostAndPort(net.ParseIP(serverIP), port)) | ||
} | ||
|
||
ginkgo.By("creating a \"default deny\" network policy") | ||
_, err = makeDenyAllPolicy(f, f.Namespace.Name) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
|
||
ginkgo.By("asserting the *client* pod can not contact the server pod exposed endpoint") | ||
podShouldNotReach(oc, clientPodConfig.name, formatHostAndPort(net.ParseIP(serverIP), port)) | ||
|
||
}, | ||
ginkgo.Entry( | ||
"in L2 dualstack primary UDN", | ||
"layer2", | ||
*podConfig( | ||
"client-pod", | ||
), | ||
*podConfig( | ||
"server-pod", | ||
withCommand(func() []string { | ||
return httpServerContainerCmd(port) | ||
}), | ||
), | ||
), | ||
ginkgo.Entry( | ||
"in L3 dualstack primary UDN", | ||
"layer3", | ||
*podConfig( | ||
"client-pod", | ||
), | ||
*podConfig( | ||
"server-pod", | ||
withCommand(func() []string { | ||
return httpServerContainerCmd(port) | ||
}), | ||
), | ||
), | ||
) | ||
|
||
ginkgo.DescribeTable( | ||
"allow ingress traffic to one pod from a particular namespace", | ||
func( | ||
topology string, | ||
clientPodConfig podConfiguration, | ||
allowServerPodConfig podConfiguration, | ||
denyServerPodConfig podConfiguration, | ||
) { | ||
|
||
namespaceYellow := getNamespaceName(f, nameSpaceYellowSuffix) | ||
namespaceBlue := getNamespaceName(f, namespaceBlueSuffix) | ||
|
||
nad := networkAttachmentConfigParams{ | ||
topology: topology, | ||
cidr: correctCIDRFamily(oc, userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), | ||
// Both yellow and blue namespaces are going to served by green network. | ||
// Use random suffix for the network name to avoid race between tests. | ||
networkName: fmt.Sprintf("%s-%s", "green", rand.String(randomStringLength)), | ||
role: "primary", | ||
} | ||
|
||
// Use random suffix in net conf name to avoid race between tests. | ||
netConfName := fmt.Sprintf("sharednet-%s", rand.String(randomStringLength)) | ||
for _, namespace := range []string{namespaceYellow, namespaceBlue} { | ||
ginkgo.By("creating the attachment configuration for " + netConfName + " in namespace " + namespace) | ||
netConfig := newNetworkAttachmentConfig(nad) | ||
netConfig.namespace = namespace | ||
netConfig.name = netConfName | ||
|
||
_, err := nadClient.NetworkAttachmentDefinitions(namespace).Create( | ||
context.Background(), | ||
generateNAD(netConfig), | ||
metav1.CreateOptions{}, | ||
) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
} | ||
|
||
workerNodes, err := getWorkerNodesOrdered(cs) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
gomega.Expect(len(workerNodes)).To(gomega.BeNumerically(">=", 1)) | ||
|
||
ginkgo.By("creating client/server pods") | ||
allowServerPodConfig.namespace = namespaceYellow | ||
allowServerPodConfig.nodeSelector = map[string]string{nodeHostnameKey: workerNodes[len(workerNodes)-1].Name} | ||
denyServerPodConfig.namespace = namespaceYellow | ||
denyServerPodConfig.nodeSelector = map[string]string{nodeHostnameKey: workerNodes[len(workerNodes)-1].Name} | ||
clientPodConfig.namespace = namespaceBlue | ||
clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: workerNodes[0].Name} | ||
runUDNPod(cs, namespaceYellow, allowServerPodConfig, func(pod *v1.Pod) { | ||
setRuntimeDefaultPSA(pod) | ||
}) | ||
runUDNPod(cs, namespaceYellow, denyServerPodConfig, func(pod *v1.Pod) { | ||
setRuntimeDefaultPSA(pod) | ||
}) | ||
runUDNPod(cs, namespaceBlue, clientPodConfig, func(pod *v1.Pod) { | ||
setRuntimeDefaultPSA(pod) | ||
}) | ||
|
||
ginkgo.By("asserting the server pods have an IP from the configured range") | ||
allowServerPodIP, err := podIPsForUserDefinedPrimaryNetwork(cs, namespaceYellow, allowServerPodConfig.name, | ||
namespacedName(namespaceYellow, netConfName), 0) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
ginkgo.By(fmt.Sprintf("asserting the allow server pod IP %v is from the configured range %v/%v", allowServerPodIP, | ||
userDefinedNetworkIPv4Subnet, netPrefixLengthPerNode)) | ||
subnet, err := getNetCIDRSubnet(userDefinedNetworkIPv4Subnet) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
gomega.Expect(inRange(subnet, allowServerPodIP)).To(gomega.Succeed()) | ||
denyServerPodIP, err := podIPsForUserDefinedPrimaryNetwork(cs, namespaceYellow, denyServerPodConfig.name, | ||
namespacedName(namespaceYellow, netConfName), 0) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
ginkgo.By(fmt.Sprintf("asserting the deny server pod IP %v is from the configured range %v/%v", denyServerPodIP, | ||
userDefinedNetworkIPv4Subnet, netPrefixLengthPerNode)) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
gomega.Expect(inRange(subnet, denyServerPodIP)).To(gomega.Succeed()) | ||
|
||
ginkgo.By("asserting the *client* pod can contact the allow server pod exposed endpoint") | ||
namespacePodShouldReach(oc, clientPodConfig.namespace, clientPodConfig.name, formatHostAndPort(net.ParseIP(allowServerPodIP), port)) | ||
|
||
ginkgo.By("asserting the *client* pod can contact the deny server pod exposed endpoint") | ||
namespacePodShouldReach(oc, clientPodConfig.namespace, clientPodConfig.name, formatHostAndPort(net.ParseIP(denyServerPodIP), port)) | ||
|
||
ginkgo.By("creating a \"default deny\" network policy") | ||
_, err = makeDenyAllPolicy(f, namespaceYellow) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
|
||
ginkgo.By("asserting the *client* pod can not contact the allow server pod exposed endpoint") | ||
namespacePodShouldNotReach(oc, clientPodConfig.namespace, clientPodConfig.name, formatHostAndPort(net.ParseIP(allowServerPodIP), port)) | ||
|
||
ginkgo.By("asserting the *client* pod can not contact the deny server pod exposed endpoint") | ||
namespacePodShouldNotReach(oc, clientPodConfig.namespace, clientPodConfig.name, formatHostAndPort(net.ParseIP(denyServerPodIP), port)) | ||
|
||
ginkgo.By("creating a \"allow-traffic-to-pod\" network policy") | ||
_, err = allowTrafficToPodFromNamespacePolicy(f, namespaceYellow, namespaceBlue, "allow-traffic-to-pod", allowServerPodLabel) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
|
||
ginkgo.By("asserting the *client* pod can contact the allow server pod exposed endpoint") | ||
namespacePodShouldReach(oc, clientPodConfig.namespace, clientPodConfig.name, formatHostAndPort(net.ParseIP(allowServerPodIP), port)) | ||
|
||
ginkgo.By("asserting the *client* pod can not contact deny server pod exposed endpoint") | ||
namespacePodShouldNotReach(oc, clientPodConfig.namespace, clientPodConfig.name, formatHostAndPort(net.ParseIP(denyServerPodIP), port)) | ||
}, | ||
ginkgo.Entry( | ||
"in L2 primary UDN", | ||
"layer2", | ||
*podConfig( | ||
"client-pod", | ||
), | ||
*podConfig( | ||
"allow-server-pod", | ||
withCommand(func() []string { | ||
return httpServerContainerCmd(port) | ||
}), | ||
withLabels(allowServerPodLabel), | ||
), | ||
*podConfig( | ||
"deny-server-pod", | ||
withCommand(func() []string { | ||
return httpServerContainerCmd(port) | ||
}), | ||
withLabels(denyServerPodLabel), | ||
), | ||
), | ||
ginkgo.Entry( | ||
"in L3 primary UDN", | ||
"layer3", | ||
*podConfig( | ||
"client-pod", | ||
), | ||
*podConfig( | ||
"allow-server-pod", | ||
withCommand(func() []string { | ||
return httpServerContainerCmd(port) | ||
}), | ||
withLabels(allowServerPodLabel), | ||
), | ||
*podConfig( | ||
"deny-server-pod", | ||
withCommand(func() []string { | ||
return httpServerContainerCmd(port) | ||
}), | ||
withLabels(denyServerPodLabel), | ||
), | ||
)) | ||
}) | ||
}) | ||
|
||
func getNamespaceName(f *framework.Framework, nsSuffix string) string { | ||
return fmt.Sprintf("%s-%s", f.Namespace.Name, nsSuffix) | ||
} | ||
|
||
func allowTrafficToPodFromNamespacePolicy(f *framework.Framework, namespace, fromNamespace, policyName string, podLabel map[string]string) (*knet.NetworkPolicy, error) { | ||
policy := &knet.NetworkPolicy{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: policyName, | ||
}, | ||
Spec: knet.NetworkPolicySpec{ | ||
PodSelector: metav1.LabelSelector{MatchLabels: podLabel}, | ||
PolicyTypes: []knet.PolicyType{knet.PolicyTypeIngress}, | ||
Ingress: []knet.NetworkPolicyIngressRule{{From: []knet.NetworkPolicyPeer{ | ||
{NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/metadata.name": fromNamespace}}}}}}, | ||
}, | ||
} | ||
return f.ClientSet.NetworkingV1().NetworkPolicies(namespace).Create(context.TODO(), policy, metav1.CreateOptions{}) | ||
} |
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Your version is totally equivalent, but I think it's cleaner to just pass "default" as namespace, instead of relying on the empty string.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@ricky-rav passing empty string for namespace is intentional here so that oc exec uses pod from
f.Namespace.Name
namespace.