diff --git a/plugins/npa-v1alpha2/clusternetworkpolicy.go b/plugins/npa-v1alpha2/clusternetworkpolicy.go index f63a0f63..a580a472 100644 --- a/plugins/npa-v1alpha2/clusternetworkpolicy.go +++ b/plugins/npa-v1alpha2/clusternetworkpolicy.go @@ -144,24 +144,37 @@ func (c *ClusterNetworkPolicy) evaluateClusterEgress( ) npav1alpha2.ClusterNetworkPolicyRuleAction { for _, policy := range policies { for _, rule := range policy.Spec.Egress { - if rule.Ports != nil { - if !evaluateClusterNetworkPolicyPort(*rule.Ports, dstPod, dstPort, protocol) { + // A rule matches if both its ports and peers match. + // 1. Check ports + if rule.Protocols != nil { + if !evaluateClusterNetworkPolicyProtocols(*rule.Protocols, dstPod, dstPort, protocol) { continue } } + // 2. Check peers + // An empty 'To' slice matches all destinations. + if len(rule.To) == 0 { + return rule.Action + } + + // If 'To' is not empty, at least one peer must match. + peerMatches := false for _, to := range rule.To { if to.Namespaces != nil && dstPod != nil && networkpolicy.MatchesSelector(to.Namespaces, dstPod.Namespace.Labels) { - return rule.Action + peerMatches = true + break } if to.Pods != nil && dstPod != nil && networkpolicy.MatchesSelector(&to.Pods.NamespaceSelector, dstPod.Namespace.Labels) && networkpolicy.MatchesSelector(&to.Pods.PodSelector, dstPod.Labels) { - return rule.Action + peerMatches = true + break } if to.Nodes != nil && dstPod != nil && networkpolicy.MatchesSelector(to.Nodes, dstPod.Node.Labels) { - return rule.Action + peerMatches = true + break } for _, network := range to.Networks { @@ -170,14 +183,27 @@ func (c *ClusterNetworkPolicy) evaluateClusterEgress( continue } if cidr.Contains(dstIP) { - return rule.Action + peerMatches = true + break } } + if peerMatches { + break + } + for _, domain := range to.DomainNames { if c.domainResolver != nil && c.domainResolver.ContainsIP(string(domain), dstIP) { - return rule.Action + peerMatches = true + break } } + if peerMatches { + break + } + } + + if peerMatches { + return rule.Action } } } @@ -197,63 +223,97 @@ func (c *ClusterNetworkPolicy) evaluateClusterIngress( } for _, policy := range policies { for _, rule := range policy.Spec.Ingress { - if rule.Ports != nil { - if !evaluateClusterNetworkPolicyPort(*rule.Ports, dstPod, dstPort, protocol) { + // A rule matches if both its ports and peers match. + // 1. Check ports and protocols + if rule.Protocols != nil { + if !evaluateClusterNetworkPolicyProtocols(*rule.Protocols, dstPod, dstPort, protocol) { continue } } + + // 2. Check peers + // An empty 'From' slice matches all sources. + if len(rule.From) == 0 { + return rule.Action + } + + // If 'From' is not empty, at least one peer must match. + peerMatches := false for _, from := range rule.From { if from.Namespaces != nil && networkpolicy.MatchesSelector(from.Namespaces, srcPod.Namespace.Labels) { - return rule.Action + peerMatches = true + break } if from.Pods != nil && networkpolicy.MatchesSelector(&from.Pods.NamespaceSelector, srcPod.Namespace.Labels) && networkpolicy.MatchesSelector(&from.Pods.PodSelector, srcPod.Labels) { - return rule.Action + peerMatches = true + break } } + + if peerMatches { + return rule.Action + } } } return npav1alpha2.ClusterNetworkPolicyRuleActionPass } -// evaluateClusterNetworkPolicyPort checks if a specific port and protocol match any port selectors. -func evaluateClusterNetworkPolicyPort( - policyPorts []npav1alpha2.ClusterNetworkPolicyPort, +// evaluateClusterNetworkPolicyProtocols checks if a specific port and protocol +// match any port selectors. +func evaluateClusterNetworkPolicyProtocols( + protocols []npav1alpha2.ClusterNetworkPolicyProtocol, pod *api.PodInfo, port int, protocol v1.Protocol, ) bool { - if len(policyPorts) == 0 { - return true + if len(protocols) == 0 { + return false } - for _, policyPort := range policyPorts { - if policyPort.PortNumber != nil && - policyPort.PortNumber.Port == int32(port) && - policyPort.PortNumber.Protocol == protocol { + for _, policy := range protocols { + if evaluateProtocolPort(policy, pod, int32(port), protocol) { return true } + } - if policyPort.NamedPort != nil { - if pod == nil { - continue - } - for _, containerPort := range pod.ContainerPorts { - if containerPort.Name == *policyPort.NamedPort && v1.Protocol(containerPort.Protocol) == protocol && containerPort.Port == int32(port) { - return true - } + return false +} + +func evaluateProtocolPort( + policy npav1alpha2.ClusterNetworkPolicyProtocol, + pod *api.PodInfo, + port int32, + protocol v1.Protocol, +) bool { + if policy.Protocol != protocol { + return false + } + + switch { + case policy.Port.Number != nil: + return *policy.Port.Number == port + + case policy.Port.Name != nil: + if pod == nil { + return false + } + for _, containerPort := range pod.ContainerPorts { + nameOk := containerPort.Name == *policy.Port.Name + portOk := containerPort.Port == port + if nameOk && portOk { + return true } } - if policyPort.PortRange != nil && - policyPort.PortRange.Protocol == protocol && - policyPort.PortRange.Start <= int32(port) && - policyPort.PortRange.End >= int32(port) { + case policy.Port.Range != nil: + if policy.Port.Range.Start <= port && port <= policy.Port.Range.End { return true } } + return false } diff --git a/plugins/npa-v1alpha2/clusternetworkpolicy_test.go b/plugins/npa-v1alpha2/clusternetworkpolicy_test.go index 4a3e070a..8222e81b 100644 --- a/plugins/npa-v1alpha2/clusternetworkpolicy_test.go +++ b/plugins/npa-v1alpha2/clusternetworkpolicy_test.go @@ -11,7 +11,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/component-base/logs" "k8s.io/klog/v2" - "k8s.io/utils/ptr" "sigs.k8s.io/kube-network-policies/pkg/api" "sigs.k8s.io/kube-network-policies/pkg/network" @@ -154,6 +153,14 @@ func Test_ClusterNetworkPolicy_Evaluation(t *testing.T) { }} }) + adminDenyAllEgressEmptyTo := makeClusterNetworkPolicy("admin-deny-all-egress-empty-to", npav1alpha2.AdminTier, 100, func(p *npav1alpha2.ClusterNetworkPolicy) { + p.Spec.Subject = npav1alpha2.ClusterNetworkPolicySubject{Namespaces: &metav1.LabelSelector{}} + p.Spec.Egress = []npav1alpha2.ClusterNetworkPolicyEgressRule{{ + Action: npav1alpha2.ClusterNetworkPolicyRuleActionDeny, + To: []npav1alpha2.ClusterNetworkPolicyEgressPeer{}, + }} + }) + adminAllowEgressToNSBar := makeClusterNetworkPolicy("admin-allow-egress-to-ns-bar", npav1alpha2.AdminTier, 50, func(p *npav1alpha2.ClusterNetworkPolicy) { p.Spec.Subject = npav1alpha2.ClusterNetworkPolicySubject{Namespaces: &metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/metadata.name": "ns-foo"}}} p.Spec.Egress = []npav1alpha2.ClusterNetworkPolicyEgressRule{{ @@ -171,6 +178,14 @@ func Test_ClusterNetworkPolicy_Evaluation(t *testing.T) { }} }) + baselineDenyAllIngressEmptyFrom := makeClusterNetworkPolicy("baseline-deny-all-ingress-empty-from", npav1alpha2.BaselineTier, 100, func(p *npav1alpha2.ClusterNetworkPolicy) { + p.Spec.Subject = npav1alpha2.ClusterNetworkPolicySubject{Namespaces: &metav1.LabelSelector{}} + p.Spec.Ingress = []npav1alpha2.ClusterNetworkPolicyIngressRule{{ + Action: npav1alpha2.ClusterNetworkPolicyRuleActionDeny, + From: []npav1alpha2.ClusterNetworkPolicyIngressPeer{}, + }} + }) + baselineAllowIngressFromNSFoo := makeClusterNetworkPolicy("baseline-allow-ingress-from-ns-foo", npav1alpha2.BaselineTier, 50, func(p *npav1alpha2.ClusterNetworkPolicy) { p.Spec.Subject = npav1alpha2.ClusterNetworkPolicySubject{Namespaces: &metav1.LabelSelector{MatchLabels: map[string]string{"team": "bar"}}} p.Spec.Ingress = []npav1alpha2.ClusterNetworkPolicyIngressRule{{ @@ -187,19 +202,26 @@ func Test_ClusterNetworkPolicy_Evaluation(t *testing.T) { wantBase api.Verdict }{ { - name: "Admin: Deny Egress overrides lower priority Allow", + name: "Admin: Higher priority Allow Egress rule is effective", policies: []*npav1alpha2.ClusterNetworkPolicy{adminDenyAllEgress, adminAllowEgressToNSBar}, packet: network.Packet{SrcIP: net.ParseIP("192.168.1.11"), DstIP: net.ParseIP("192.168.2.22")}, wantAdmin: api.VerdictAccept, // Because adminAllowEgressToNSBar has higher priority (50 < 100) wantBase: api.VerdictNext, }, { - name: "Admin: No matching policy should pass", + name: "Admin: Deny-all Egress policy is effective", policies: []*npav1alpha2.ClusterNetworkPolicy{adminDenyAllEgress}, // This policy applies to all pods packet: network.Packet{SrcIP: net.ParseIP("192.168.1.11"), DstIP: net.ParseIP("192.168.3.33")}, wantAdmin: api.VerdictDeny, wantBase: api.VerdictNext, }, + { + name: "Admin: Deny Egress with empty To rule", + policies: []*npav1alpha2.ClusterNetworkPolicy{adminDenyAllEgressEmptyTo}, + packet: network.Packet{SrcIP: net.ParseIP("192.168.1.11"), DstIP: net.ParseIP("192.168.2.22")}, + wantAdmin: api.VerdictDeny, + wantBase: api.VerdictNext, + }, { name: "Baseline: Allow Ingress from specific namespace", policies: []*npav1alpha2.ClusterNetworkPolicy{baselineDenyAllIngress, baselineAllowIngressFromNSFoo}, @@ -214,6 +236,13 @@ func Test_ClusterNetworkPolicy_Evaluation(t *testing.T) { wantAdmin: api.VerdictNext, wantBase: api.VerdictDeny, // Denied by baselineDenyAllIngress }, + { + name: "Baseline: Deny Ingress with empty From rule", + policies: []*npav1alpha2.ClusterNetworkPolicy{baselineDenyAllIngressEmptyFrom}, + packet: network.Packet{SrcIP: net.ParseIP("192.168.1.11"), DstIP: net.ParseIP("192.168.2.22")}, + wantAdmin: api.VerdictNext, + wantBase: api.VerdictDeny, + }, { name: "No policies applied", policies: []*npav1alpha2.ClusterNetworkPolicy{}, @@ -279,67 +308,350 @@ func Test_ClusterNetworkPolicy_Evaluation(t *testing.T) { // verdictToBool simplifies comparison for tests where the exact verdict doesn't matter, only allow/deny. func verdictToBool(v api.Verdict) bool { - return v == api.VerdictAccept || v == api.VerdictNext + switch v { + case api.VerdictAccept: + return true + case api.VerdictDeny: + return false + case api.VerdictNext: + // For tests, we can treat 'Next' as an implicit allow, as the chain stops here. + return true + default: + return false + } } // --- Port Evaluation Test --- -func Test_evaluateClusterNetworkPolicyPort(t *testing.T) { +func Test_evaluateClusterNetworkPolicyProtocols(t *testing.T) { + pod := makePod("test-pod", "test-ns", "1.2.3.4") + pi := func(x int32) *int32 { return &x } + ps := func(x string) *string { return &x } + tests := []struct { - name string - policyPorts []npav1alpha2.ClusterNetworkPolicyPort - pod *v1.Pod - port int - protocol v1.Protocol - want bool + name string + policy []npav1alpha2.ClusterNetworkPolicyProtocol + port int + protocol v1.Protocol + want bool }{ { - name: "empty ports match all", - pod: makePod("test", "nstest", "192.168.1.1"), - want: true, + name: "empty", + policy: []npav1alpha2.ClusterNetworkPolicyProtocol{}, + port: 80, + protocol: v1.ProtocolTCP, + want: false, }, { - name: "match port number", - policyPorts: []npav1alpha2.ClusterNetworkPolicyPort{{ - PortNumber: &npav1alpha2.Port{Protocol: v1.ProtocolTCP, Port: 80}, - }}, + name: "one policy, match", + policy: []npav1alpha2.ClusterNetworkPolicyProtocol{ + { + Protocol: v1.ProtocolTCP, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Number: pi(80), + }, + }, + }, port: 80, protocol: v1.ProtocolTCP, want: true, }, + { + name: "one policy, no match", + policy: []npav1alpha2.ClusterNetworkPolicyProtocol{ + { + Protocol: v1.ProtocolTCP, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Number: pi(8080), + }, + }, + }, + port: 80, + protocol: v1.ProtocolTCP, + want: false, + }, + { + name: "multiple policies, match", + policy: []npav1alpha2.ClusterNetworkPolicyProtocol{ + { + Protocol: v1.ProtocolTCP, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Number: pi(8080), + }, + }, + { + Protocol: v1.ProtocolTCP, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Number: pi(80), + }, + }, + }, + port: 80, + protocol: v1.ProtocolTCP, + want: true, + }, + { + name: "multiple policies, no match", + policy: []npav1alpha2.ClusterNetworkPolicyProtocol{ + { + Protocol: v1.ProtocolTCP, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Number: pi(8080), + }, + }, + { + Protocol: v1.ProtocolUDP, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Number: pi(80), + }, + }, + }, + port: 80, + protocol: v1.ProtocolTCP, + want: false, + }, { name: "match named port", - policyPorts: []npav1alpha2.ClusterNetworkPolicyPort{{ - NamedPort: ptr.To[string]("http"), - }}, - pod: makePod("test", "nstest", "192.168.1.1"), + policy: []npav1alpha2.ClusterNetworkPolicyProtocol{ + { + Protocol: v1.ProtocolTCP, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Name: ps("http"), + }, + }, + }, port: 80, protocol: v1.ProtocolTCP, want: true, }, { name: "match port range", - policyPorts: []npav1alpha2.ClusterNetworkPolicyPort{{ - PortRange: &npav1alpha2.PortRange{Protocol: v1.ProtocolTCP, Start: 80, End: 90}, - }}, + policy: []npav1alpha2.ClusterNetworkPolicyProtocol{ + { + Protocol: v1.ProtocolTCP, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Range: &npav1alpha2.PortRange{Start: 80, End: 90}, + }, + }, + }, port: 85, protocol: v1.ProtocolTCP, want: true, }, { - name: "no match port number", - policyPorts: []npav1alpha2.ClusterNetworkPolicyPort{{ - PortNumber: &npav1alpha2.Port{Protocol: v1.ProtocolTCP, Port: 80}, - }}, - port: 443, + name: "mixed types, match", + policy: []npav1alpha2.ClusterNetworkPolicyProtocol{ + { + Protocol: v1.ProtocolTCP, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Number: pi(9090), + }, + }, + { + Protocol: v1.ProtocolTCP, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Name: ps("http"), + }, + }, + { + Protocol: v1.ProtocolTCP, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Range: &npav1alpha2.PortRange{Start: 100, End: 200}, + }, + }, + }, + port: 80, protocol: v1.ProtocolTCP, + want: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + podInfo := api.NewPodInfo(pod, makeNamespace("foo").Labels, makeNode("testnode").Labels, "id") + if got := evaluateClusterNetworkPolicyProtocols(tc.policy, podInfo, tc.port, tc.protocol); got != tc.want { + t.Errorf("evaluateClusterNetworkPolicyPort() = %v, want %v", got, tc.want) + } + }) + } +} + +func Test_evaluateProtocolPort(t *testing.T) { + const ( + tcp v1.Protocol = v1.ProtocolTCP + udp v1.Protocol = v1.ProtocolUDP + ) + + pod := makePod("test", "nstest", "192.168.1.1") + podInfo := api.NewPodInfo(pod, makeNamespace("foo").Labels, makeNode("testnode").Labels, "id") + + pi := func(x int32) *int32 { return &x } + ps := func(x string) *string { return &x } + + tests := []struct { + name string + policy npav1alpha2.ClusterNetworkPolicyProtocol + pod *api.PodInfo + port int32 + protocol v1.Protocol + want bool + }{ + { + name: "match port number", + policy: npav1alpha2.ClusterNetworkPolicyProtocol{ + Protocol: tcp, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Number: pi(80), + }, + }, + pod: podInfo, + port: 80, + protocol: tcp, + want: true, + }, + { + name: "no match wrong port number", + policy: npav1alpha2.ClusterNetworkPolicyProtocol{ + Protocol: tcp, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Number: pi(80), + }, + }, + pod: podInfo, + port: 8080, + protocol: tcp, + want: false, + }, + { + name: "no match wrong protocol", + policy: npav1alpha2.ClusterNetworkPolicyProtocol{ + Protocol: tcp, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Number: pi(80), + }, + }, + pod: podInfo, + port: 80, + protocol: udp, + want: false, + }, + { + name: "match named port", + policy: npav1alpha2.ClusterNetworkPolicyProtocol{ + Protocol: tcp, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Name: ps("http"), + }, + }, + pod: podInfo, + port: 80, + protocol: tcp, + want: true, + }, + { + name: "no match wrong named port", + policy: npav1alpha2.ClusterNetworkPolicyProtocol{ + Protocol: tcp, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Name: ps("no-match"), + }, + }, + pod: podInfo, + port: 80, + protocol: tcp, + want: false, + }, + { + name: "match port range", + policy: npav1alpha2.ClusterNetworkPolicyProtocol{ + Protocol: tcp, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Range: &npav1alpha2.PortRange{Start: 80, End: 90}, + }, + }, + pod: podInfo, + port: 85, + protocol: tcp, + want: true, + }, + { + name: "match port range start", + policy: npav1alpha2.ClusterNetworkPolicyProtocol{ + Protocol: tcp, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Range: &npav1alpha2.PortRange{Start: 80, End: 90}, + }, + }, + pod: podInfo, + port: 80, + protocol: tcp, + want: true, + }, + { + name: "match port range end", + policy: npav1alpha2.ClusterNetworkPolicyProtocol{ + Protocol: tcp, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Range: &npav1alpha2.PortRange{Start: 80, End: 90}, + }, + }, + pod: podInfo, + port: 90, + protocol: tcp, + want: true, + }, + { + name: "no match port range below", + policy: npav1alpha2.ClusterNetworkPolicyProtocol{ + Protocol: tcp, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Range: &npav1alpha2.PortRange{Start: 80, End: 90}, + }, + }, + pod: podInfo, + port: 79, + protocol: tcp, + want: false, + }, + { + name: "no match port range above", + policy: npav1alpha2.ClusterNetworkPolicyProtocol{ + Protocol: tcp, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Range: &npav1alpha2.PortRange{Start: 80, End: 90}, + }, + }, + pod: podInfo, + port: 91, + protocol: tcp, + want: false, + }, + { + name: "no match port range wrong protocol", + policy: npav1alpha2.ClusterNetworkPolicyProtocol{ + Protocol: tcp, + Port: &npav1alpha2.ClusterNetworkPolicyPort{ + Range: &npav1alpha2.PortRange{Start: 80, End: 90}, + }, + }, + pod: podInfo, + port: 85, + protocol: udp, + want: false, + }, + { + name: "empty policy", + policy: npav1alpha2.ClusterNetworkPolicyProtocol{}, + pod: podInfo, + port: 1234, + protocol: tcp, want: false, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - podInfo := api.NewPodInfo(tt.pod, makeNamespace("foo").Labels, makeNode("testnode").Labels, "id") - if got := evaluateClusterNetworkPolicyPort(tt.policyPorts, podInfo, tt.port, tt.protocol); got != tt.want { - t.Errorf("evaluateClusterNetworkPolicyPort() = %v, want %v", got, tt.want) + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := evaluateProtocolPort(tc.policy, tc.pod, tc.port, tc.protocol) + if got != tc.want { + t.Errorf("evaluateProtocolPort() = %v, want %v", got, tc.want) } }) }