-
Notifications
You must be signed in to change notification settings - Fork 4.8k
test/extended: Add MultiNetworkPolicy test case #27449
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,154 @@ | ||
| package networking | ||
|
|
||
| import ( | ||
| "context" | ||
| "fmt" | ||
|
|
||
| g "github.com/onsi/ginkgo/v2" | ||
| o "github.com/onsi/gomega" | ||
|
|
||
| operatorv1 "github.com/openshift/api/operator/v1" | ||
| operatorclientv1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1" | ||
| v1 "k8s.io/api/core/v1" | ||
| metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
| frameworkpod "k8s.io/kubernetes/test/e2e/framework/pod" | ||
|
|
||
| admissionapi "k8s.io/pod-security-admission/api" | ||
|
|
||
| exutil "github.com/openshift/origin/test/extended/util" | ||
| ) | ||
|
|
||
| var _ = g.Describe("[sig-network][Feature:MultiNetworkPolicy]", func() { | ||
|
|
||
| oc := exutil.NewCLIWithPodSecurityLevel("multinetpol-e2e", admissionapi.LevelBaseline) | ||
| f := oc.KubeFramework() | ||
|
|
||
| var previousUseMultiNetworkPolicy bool | ||
| g.BeforeEach(func() { | ||
| previousUseMultiNetworkPolicy = isMultinetNetworkPolicyEnabled(oc) | ||
| }) | ||
|
|
||
| g.AfterEach(func() { | ||
| if !previousUseMultiNetworkPolicy { | ||
| disableMultiNetworkPolicy(oc) | ||
| } | ||
| }) | ||
|
|
||
| g.It("should enforce a network policies on secondary network", func() { | ||
| var err error | ||
| ns := f.Namespace.Name | ||
|
|
||
| g.By("creating a macvlan net-attach-def") | ||
| nad_yaml := exutil.FixturePath("testdata", "net-attach-defs", "macvlan-nad.yml") | ||
| err = oc.AsAdmin().Run("create").Args("-f", nad_yaml).Execute() | ||
| o.Expect(err).NotTo(o.HaveOccurred()) | ||
|
|
||
| g.By("launching pod with an annotation to use the net-attach-def") | ||
| networksTemplate := `[ | ||
| { | ||
| "name": "macvlan1-nad", | ||
| "ips": ["%s"] | ||
| } | ||
| ]` | ||
|
|
||
| podName := "multinetpol-test-pod-" | ||
|
|
||
| frameworkpod.CreateExecPodOrFail(f.ClientSet, ns, podName, func(pod *v1.Pod) { | ||
| pod.Spec.Containers[0].Args = []string{"net", "--serve", "192.0.2.1:8889"} | ||
| pod.ObjectMeta.Labels = map[string]string{"pod": "a"} | ||
| pod.ObjectMeta.Annotations = map[string]string{ | ||
| "k8s.v1.cni.cncf.io/networks": fmt.Sprintf(networksTemplate, "192.0.2.1/24")} | ||
| }) | ||
|
|
||
| testPodB := frameworkpod.CreateExecPodOrFail(f.ClientSet, ns, podName, func(pod *v1.Pod) { | ||
| pod.ObjectMeta.Labels = map[string]string{"pod": "b"} | ||
| pod.ObjectMeta.Annotations = map[string]string{ | ||
| "k8s.v1.cni.cncf.io/networks": fmt.Sprintf(networksTemplate, "192.0.2.2/24")} | ||
| }) | ||
|
|
||
| frameworkpod.CreateExecPodOrFail(f.ClientSet, ns, podName, func(pod *v1.Pod) { | ||
| pod.Spec.Containers[0].Args = []string{"net", "--serve", "192.0.2.3:8889"} | ||
| pod.ObjectMeta.Labels = map[string]string{"pod": "c"} | ||
| pod.ObjectMeta.Annotations = map[string]string{ | ||
| "k8s.v1.cni.cncf.io/networks": fmt.Sprintf(networksTemplate, "192.0.2.3/24")} | ||
| }) | ||
|
|
||
| g.By("checking podB can connnect to podA") | ||
| podShouldReach(oc, testPodB.Name, "192.0.2.1:8889") | ||
|
|
||
| g.By("enabling MultiNetworkPolicies on cluster") | ||
| enableMultiNetworkPolicy(oc) | ||
|
|
||
| g.By("creating a deny-all-ingress traffic to pod:a MultiNetworkPolicy") | ||
| multinetpolicy_yaml := exutil.FixturePath("testdata", "multinetpolicy", "deny-ingress-pod-a.yml") | ||
| err = oc.AsAdmin().Run("create").Args("-f", multinetpolicy_yaml).Execute() | ||
| o.Expect(err).To(o.Succeed()) | ||
|
|
||
| g.By("checking podB can NOT connnect to podA") | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think you need to test more than this. As written, this test would pass even if the actual behavior was "activating MNP completely breaks all secondary networks" 😬
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good point! To achieve it, I need to:
But it will make the test a little more complicated. Do you have any suggestions for this?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I refactored the test a little, now there is a deny-all-ingress rule applied only to pod-A, and I added another server pod-C. This way I can test pod-B can't connect to A but can still reach C. WDYT? |
||
| podShouldNotReach(oc, testPodB.Name, "192.0.2.1:8889") | ||
|
|
||
| g.By("checking podB can still connnect to podC") | ||
| podShouldReach(oc, testPodB.Name, "192.0.2.3:8889") | ||
| }) | ||
|
|
||
| }) | ||
|
|
||
| func enableMultiNetworkPolicy(oc *exutil.CLI) { | ||
| c := oc.AdminOperatorClient().OperatorV1().Networks() | ||
| config := getCluserNetwork(c) | ||
|
|
||
| b := true | ||
| config.Spec.UseMultiNetworkPolicy = &b | ||
|
|
||
| _, err := c.Update(context.Background(), config, metav1.UpdateOptions{}) | ||
| o.Expect(err).NotTo(o.HaveOccurred()) | ||
|
|
||
| o.Eventually(func() error { | ||
| return oc.AsAdmin().Run("get").Args("multi-networkpolicies.k8s.cni.cncf.io").Execute() | ||
| }, "30s", "2s").Should(o.Succeed()) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This doesn't reliably indicate that MNP enforcement is in effect... that depends on deploying a DaemonSet as well, right? The timeouts help, but it would be better if there was a good way of ensuring that the DaemonSet is fully deployed, so the test doesn't end up flaking under heavy load. (But also, we don't want to make too many assumptions about exactly what CNO is doing when you enable the feature, especially since we might change to a different MNP implementation in the future... I'm not sure what the best approach here is...)
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I agree with your considerations, it's not easy to decide what's the best approach. From the user's perspective, turning on the feature means that MNP resource becomes available. If the Daemonset (or any other component) is not working or it is not started, then some other assertion will fail. As a further example, in the future we can also decide to keep the Daemonset up and running but not creating iptables if the flag is set to false. That said, I would keep it as simple as possible. |
||
| } | ||
|
|
||
| func disableMultiNetworkPolicy(oc *exutil.CLI) { | ||
| c := oc.AdminOperatorClient().OperatorV1().Networks() | ||
| config := getCluserNetwork(c) | ||
|
|
||
| b := false | ||
| config.Spec.UseMultiNetworkPolicy = &b | ||
|
|
||
| _, err := c.Update(context.Background(), config, metav1.UpdateOptions{}) | ||
| o.Expect(err).NotTo(o.HaveOccurred()) | ||
|
|
||
| o.Eventually(func() error { | ||
| return oc.AsAdmin().Run("get").Args("multi-networkpolicies.k8s.cni.cncf.io").Execute() | ||
| }, "20s", "1s").Should(o.HaveOccurred()) | ||
| } | ||
|
|
||
| func isMultinetNetworkPolicyEnabled(oc *exutil.CLI) bool { | ||
| c := oc.AdminOperatorClient().OperatorV1().Networks() | ||
| config := getCluserNetwork(c) | ||
| return *config.Spec.UseMultiNetworkPolicy | ||
| } | ||
|
|
||
| func getCluserNetwork(c operatorclientv1.NetworkInterface) *operatorv1.Network { | ||
| ret, err := c.Get(context.Background(), "cluster", metav1.GetOptions{}) | ||
| o.Expect(err).NotTo(o.HaveOccurred()) | ||
| return ret | ||
| } | ||
|
|
||
| func podShouldReach(oc *exutil.CLI, podName, address string) { | ||
| out := "" | ||
| o.EventuallyWithOffset(1, func() error { | ||
| var err error | ||
| out, err = oc.AsAdmin().Run("exec").Args(podName, "--", "curl", "--connect-timeout", "1", address).Output() | ||
| return err | ||
| }, "30s", "1s").ShouldNot(o.HaveOccurred(), "cmd output: %s", out) | ||
| } | ||
|
|
||
| func podShouldNotReach(oc *exutil.CLI, podName, address string) { | ||
| out := "" | ||
| o.EventuallyWithOffset(1, func() error { | ||
| var err error | ||
| out, err = oc.AsAdmin().Run("exec").Args(podName, "--", "curl", "--connect-timeout", "1", address).Output() | ||
| return err | ||
| }, "30s", "1s").Should(o.HaveOccurred(), "cmd output: %s", out) | ||
| } | ||
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,11 @@ | ||
| apiVersion: k8s.cni.cncf.io/v1beta1 | ||
| kind: MultiNetworkPolicy | ||
| metadata: | ||
| generateName: deny-ingress-pod-a | ||
| annotations: | ||
| k8s.v1.cni.cncf.io/policy-for: macvlan1-nad | ||
| spec: | ||
| podSelector: | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. should we add
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. sure! |
||
| matchLabels: | ||
| pod: a | ||
| ingress: [] | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,16 @@ | ||
| apiVersion: "k8s.cni.cncf.io/v1" | ||
| kind: NetworkAttachmentDefinition | ||
| metadata: | ||
| name: macvlan1-nad | ||
| spec: | ||
| config: '{ | ||
| "cniVersion": "0.3.1", | ||
| "name": "macvlan1-nad", | ||
| "plugins": [ | ||
| { | ||
| "type": "macvlan", | ||
| "capabilities": { "ips": true }, | ||
| "mode": "bridge", | ||
| "ipam": { "type": "static" } | ||
| }] | ||
| }' |
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Would our tests be simpler if we added some NAD-related helper functions?
etc? Or something? (I don't remember exactly what the overlap is with the other NAD-related tests, but I know there are a lot of NAD-related tests at this point...)
(If you were going to do this, it would be best to have one commit first that adds the helpers and ports the existing tests to use them, and then a second commit adding the new MultiNetworkPolicy test, using the helpers.)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
um, wait, there's already a
createNetworkAttachmentDefinitionmethod intest/extending/networking/utils.goThere was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yep, saw it .But it uses dynamic client instead of Yaml files. Is it ok to turn every NAD creation (I found 4 in the code base) in a call to
createNetworkAttachmentDefinition()? and then removing yaml files?