Skip to content

Instantly share code, notes, and snippets.

@jayunit100
Created February 6, 2020 15:45
Show Gist options
  • Select an option

  • Save jayunit100/7a5dc5d367f12d073b022faee840c548 to your computer and use it in GitHub Desktop.

Select an option

Save jayunit100/7a5dc5d367f12d073b022faee840c548 to your computer and use it in GitHub Desktop.
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"fmt"
"github.com/onsi/ginkgo"
)
/*
The following Network Policy tests verify that policy object definitions
are correctly enforced by a networking plugin. It accomplishes this by launching
a simple netcat server, and two clients with different
attributes. Each test case creates a network policy which should only allow
connections from one of the clients. The test then asserts that the clients
failed or successfully connected as expected.
*/
// The first test can be described using the following truth table entry
var podServerLabelSelector *string
var nsAName *string
var nsBName *string
var nsCName *string
type TruthTableEntry struct {
OldDescription string
Description string
Policy *networkingv1.NetworkPolicy
Whitelist map[string]bool
}
// The following set of selectors and rules can be used to make selectors for peers
var emptyLabelSelector = metav1.LabelSelector{}
var matchNotCLabelSelector = []metav1.LabelSelectorRequirement{{
Key: "ns-name",
Operator: metav1.LabelSelectorOpNotIn,
// see the -c above, i think thats what we mean here.
Values: []string{nsCName},
}}
// used for the server, which is in every test
var serverPodSelector = metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": *podServerLabelSelector,
},
}
var clientBPodSelector = metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": "client-b",
},
}
var namespaceBSelector = metav1.LabelSelector{
MatchLabels: map[string]string{
"ns-name": *nsBName,
},
}
// The following set of IngressRules can be used to make network policy
var emptyIngressRule = networkingv1.NetworkPolicyIngressRule{}
var ingressRuleClientA = networkingv1.NetworkPolicyIngressRule{
From: []networkingv1.NetworkPolicyPeer{{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": "client-a",
},
},
}},
}
var ingressRuleNamespaceB = networkingv1.NetworkPolicyIngressRule{
From: []networkingv1.NetworkPolicyPeer{{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"ns-name": *nsBName,
},
},
}},
}
var ingressRulePodSelectorMatchExpressionA = networkingv1.NetworkPolicyIngressRule{
From: []networkingv1.NetworkPolicyPeer{{
PodSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{
Key: "pod-name",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"client-a"},
}},
},
},
},
}
var ingressRuleNameSelectorMatchExpressionNotC = networkingv1.NetworkPolicyIngressRule{
From: []networkingv1.NetworkPolicyPeer{
{
NamespaceSelector: &metav1.LabelSelector{
MatchExpressions: matchNotCLabelSelector,
},
},
},
}
// DenyAll is a NP that denies all traffic, selects no whitelist pods.
var DenyAll = &TruthTableEntry{
OldDescription: "Should support a default-deny policy [Feature:NetworkPolicy]",
Description: "ALLOW no traffic to the server",
Policy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "deny-all",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: emptyLabelSelector,
Ingress: []networkingv1.NetworkPolicyIngressRule{
emptyIngressRule,
},
},
},
// The pods which are whitelisted by this policy.
Whitelist: map[string]bool{},
}
// AllowAllInnerNamespace is a NP that allows local client 'a' via a pod selector
var AllowAllInnerNamespace = &TruthTableEntry{
OldDescription: "should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy]",
Description: "ALLOW inner-namespace, PodSelector traffic to the server [Feature:NetworkPolicy]",
Policy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-client-a-via-pod-selector",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: serverPodSelector,
Ingress: []networkingv1.NetworkPolicyIngressRule{
ingressRuleClientA,
},
},
},
// A lone pod selector will apply only to inner namespace traffic.
Whitelist: map[string]bool{"AaAs": true},
}
// AllowNamespaceBViaNSSelector is a NP that allows local client 'a' via a pod selector
var AllowNamespaceBViaNSSelector = &TruthTableEntry{
OldDescription: "should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy]",
Description: "ALLOW intra-namespace, NamespaceSelector traffic to the server [Feature:NetworkPolicy]",
Policy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-ns-b-via-namespace-selector",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: serverPodSelector,
Ingress: []networkingv1.NetworkPolicyIngressRule{
ingressRuleNamespaceB,
},
},
},
// A lone pod selector will apply only to inner namespace traffic.
Whitelist: map[string]bool{"AsAs": true, "BaAs": true, "CaAs": true},
}
// AllowNamespaceBViaNSSelector is a NP that allows local client 'a' via a pod selector
var AllowClientAViaPodSelectorMatchingExpression = &TruthTableEntry{
OldDescription: "should enforce policy based on PodSelector with MatchExpressions[Feature:NetworkPolicy]",
Description: "ALLOW PodSelector with MatchExpressions traffic to the server [Feature:NetworkPolicy]",
Policy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-client-a-via-pod-selector-with-match-expressions",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: serverPodSelector,
Ingress: []networkingv1.NetworkPolicyIngressRule{
ingressRulePodSelectorMatchExpressionA,
},
},
},
// A lone pod selector will apply only to inner namespace traffic.
Whitelist: map[string]bool{"BaAs": true, "BbAs": true},
}
var AllowClientBviaPodANDNamespace = &TruthTableEntry{
OldDescription: "should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]",
Description: "ALLOW (PodSelector inside of namespace) traffic to the server [Feature:NetworkPolicy]",
Policy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-ns-b-via-namespace-selector-or-client-b-via-pod-selector",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: serverPodSelector,
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
From: []networkingv1.NetworkPolicyPeer{
PodSelector: clientBPodSelector,
NamespaceSelector: namespaceBSelector,
},
}, // one rule, with two criteria, client and ns selector.
},
},
},
Whitelist: map[string]bool{"BbAs": true},
}
var AllowClientBviaPodORNamespace = &TruthTableEntry{
OldDescription: "should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy]",
Description: "ALLOW PodSelector OR NamespaceSelector traffic to the server [Feature:NetworkPolicy]",
Policy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-ns-b-via-namespace-selector-or-client-b-via-pod-selector",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: serverPodSelector,
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
From: []networkingv1.NetworkPolicyPeer{
{
PodSelector: clientBPodSelector,
NamespaceSelector: nil,
},
{
PodSelector: nil,
NamespaceSelector: namespaceBSelector,
},
},
}, // one rule, with two criteria, client and ns selector.
},
},
},
Whitelist: map[string]bool{"BbAs": true, "AbAs": true, "BaAs": true},
}
var AllowAnyClientOutsideOfC = &TruthTableEntry{
OldDescription: "should enforce policy based on NamespaceSelector with MatchExpressions[Feature:NetworkPolicy]",
Description: "ALLOW any pod OUTSIDE of Namespace traffic to the server [Feature:NetworkPolicy]",
Policy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-ns-b-via-namespace-selector-or-client-b-via-pod-selector",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: serverPodSelector,
Ingress: []networkingv1.NetworkPolicyIngressRule{
ingressRuleNameSelectorMatchExpressionNotC,
},
},
},
}
var _ = SIGDescribe("NetworkPolicy2 [LinuxOnly]", func() {
var podServer *v1.Pod
var service *v1.Service
f := framework.NewDefaultFramework("network-policy")
// Create the namespaces used by all tests.
nsA := f.Namespace
nsBNamePtr := f.BaseName + "-b"
nsBName = &nsBNamePtr
nsB, err := f.CreateNamespace(*nsBName, map[string]string{
"ns-name": *nsBName,
})
nsCNamePtr := f.BaseName + "-c"
nsCName = &nsCNamePtr
nsC, err := f.CreateNamespace(*nsBName, map[string]string{
"ns-name": *nsCName,
})
nsC.DeepCopy() // just to force compiliation till we use this.
// Create the pods used by all tests
podServer, service = createServerPodAndService(f, f.Namespace, "server", []int{80, 81})
// now initialize
podServerLabelSelector = podServer.ObjectMeta.Labels["pod-name"]
ginkgo.BeforeEach(func() {
// Windows does not support network policies.
e2eskipper.SkipIfNodeOSDistroIs("windows")
})
ginkgo.Context("NetworkPolicy between server and client", func() {
ginkgo.BeforeSuite(func() {
// create nsA, nsB
// create pods a, b : in each framework, nsA, nsb
// verify that all pods can talk to all other pods.
})
ginkgo.AfterSuite(func() {
// delete nsA, nsB as they were created outside of the framework context.
})
ginkgo.It(DenyAll.Description, func() {
// Verify all connectivity
// create DenyAll.Policy
// verify all connectivity in whitelist
// complement the whitelist and verify disconnectivity
})
ginkgo.It(AllowAllInnerNamespace.Description, func() {
// Verify all connectivity
// Create AllowInnerNamespaceSelector.Policy
// verify all connectivity in whitelist
// complement the whitelist and verify disconnectivity
})
ginkgo.It(AllowNamespaceBViaNSSelector.Description, func() {
// Verify all connectivity
// Create AllowInnerNamespaceSelector.Policy
// verify all connectivity in whitelist
// complement the whitelist and verify disconnectivity
})
ginkgo.It(AllowClientAViaPodSelectorMatchingExpression.Description, func() {
// Verify all connectivity
// Create AllowInnerNamespaceSelector.Policy
// verify all connectivity in whitelist
// complement the whitelist and verify disconnectivity
})
ginkgo.It(AllowAnyClientOutsideOfC.Description, func() {
// Verify all connectivity
// Create AllowAnyClientOutsideOfC.Description.Policy
// verify all connectivity in whitelist
// complement the whitelist and verify disconnectivity
})
ginkgo.It(AllowClientBviaPodORNamespace.Description, func() {
// Verify all connectivity
// Create AllowInnerNamespaceSelector.Policy
// verify all connectivity in whitelist
// complement the whitelist and verify disconnectivity
})
ginkgo.It(AllowClientBviaPodANDNamespace.Description, func() {
// Verify all connectivity
// Create AllowInnerNamespaceSelector.Policy
// verify all connectivity in whitelist
// complement the whitelist and verify disconnectivity
})
ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() {
nsA := f.Namespace
nsBName := f.BaseName + "-b"
nsB, err := f.CreateNamespace(nsBName, map[string]string{
"ns-name": nsBName,
})
framework.ExpectNoError(err, "Error occurred while creating namespace-b.")
// Wait for Server in namespaces-a to be ready
framework.Logf("Waiting for server to come up.")
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, podServer)
framework.ExpectNoError(err, "Error occurred while waiting for pod status in namespace: Running.")
// Before application of the policy, all communication should be successful.
ginkgo.By("Creating client-a, in server's namespace, which should be able to contact the server.", func() {
testCanConnect(f, nsA, "client-a", service, 80)
})
ginkgo.By("Creating client-b, in server's namespace, which should be able to contact the server.", func() {
testCanConnect(f, nsA, "client-b", service, 80)
})
ginkgo.By("Creating client-a, not in server's namespace, which should be able to contact the server.", func() {
testCanConnect(f, nsB, "client-a", service, 80)
})
ginkgo.By("Creating client-b, not in server's namespace, which should be able to contact the server.", func() {
testCanConnect(f, nsB, "client-b", service, 80)
})
ginkgo.By("Creating a network policy for the server which allows traffic only from client-a in namespace-b.")
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Namespace: nsA.Name,
Name: "allow-ns-b-client-a-via-namespace-pod-selector",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy to the Server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServerLabelSelector,
},
},
// Allow traffic only from client-a in namespace-b
Ingress: []networkingv1.NetworkPolicyIngressRule{{
From: []networkingv1.NetworkPolicyPeer{{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"ns-name": nsBName,
},
},
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": "client-a",
},
},
}},
}},
},
}
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
framework.ExpectNoError(err, "Error occurred while creating policy: policy.")
defer cleanupNetworkPolicy(f, policy)
ginkgo.By("Creating client-a, in server's namespace, which should not be able to contact the server.", func() {
testCannotConnect(f, nsA, "client-a", service, 80)
})
ginkgo.By("Creating client-b, in server's namespace, which should not be able to contact the server.", func() {
testCannotConnect(f, nsA, "client-b", service, 80)
}) // this is the positive test, for consistency, it should be at the end, rather then in the middle ?
ginkgo.By("Creating client-a, not in server's namespace, which should be able to contact the server.", func() {
testCanConnect(f, nsB, "client-a", service, 80)
})
ginkgo.By("Creating client-b, not in server's namespace, which should not be able to contact the server.", func() {
testCannotConnect(f, nsB, "client-b", service, 80)
})
})
ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() {
ginkgo.By("Creating a network policy for the Service which allows traffic only to one port.")
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-ingress-on-port-81",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply to server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServerLabelSelector,
},
},
// Allow traffic only to one port.
Ingress: []networkingv1.NetworkPolicyIngressRule{{
Ports: []networkingv1.NetworkPolicyPort{{
Port: &intstr.IntOrString{IntVal: 81},
}},
}},
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.By("Testing pods can connect only to the port allowed by the policy.")
testCannotConnect(f, f.Namespace, "client-a", service, 80)
// also need to test that clients in *other* namespaces can connect to 81, as we do in other tests.
testCanConnect(f, f.Namespace, "client-b", service, 81)
})
ginkgo.It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() {
ginkgo.By("Creating a network policy for the Service which allows traffic only to one port.")
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-ingress-on-port-80",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply to server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServerLabelSelector,
},
},
// Allow traffic only to one port.
Ingress: []networkingv1.NetworkPolicyIngressRule{{
Ports: []networkingv1.NetworkPolicyPort{{
Port: &intstr.IntOrString{IntVal: 80},
}},
}},
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.By("Creating a network policy for the Service which allows traffic only to another port.")
policy2 := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-ingress-on-port-81",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply to server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServerLabelSelector,
},
},
// Allow traffic only to one port.
Ingress: []networkingv1.NetworkPolicyIngressRule{{
Ports: []networkingv1.NetworkPolicyPort{{
Port: &intstr.IntOrString{IntVal: 81},
}},
}},
},
}
policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy2)
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy2)
ginkgo.By("Testing pods can connect to both ports when both policies are present.")
// also need to confirm the negative cases here
testCanConnect(f, f.Namespace, "client-a", service, 80)
testCanConnect(f, f.Namespace, "client-b", service, 81)
})
ginkgo.It("should support allow-all policy [Feature:NetworkPolicy]", func() {
ginkgo.By("Creating a network policy which allows all traffic.")
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-all",
},
Spec: networkingv1.NetworkPolicySpec{
// Allow all traffic
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{},
},
Ingress: []networkingv1.NetworkPolicyIngressRule{{}},
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.By("Testing pods can connect to both ports when an 'allow-all' policy is present.")
// also need to confirm the negative cases here
// also need to confirm the positive namespaces here
testCanConnect(f, f.Namespace, "client-a", service, 80)
testCanConnect(f, f.Namespace, "client-b", service, 81)
})
ginkgo.It("should allow ingress access on one named port [Feature:NetworkPolicy]", func() {
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-client-a-via-named-port-ingress-rule",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy to the Server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServerLabelSelector,
},
},
// Allow traffic to only one named port: "serve-80".
Ingress: []networkingv1.NetworkPolicyIngressRule{{
Ports: []networkingv1.NetworkPolicyPort{{
Port: &intstr.IntOrString{Type: intstr.String, StrVal: "serve-80"},
}},
}},
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
// we should specify that it can contact the server specifically on 80 here.
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80)
})
ginkgo.By("Creating client-b which should not be able to contact the server on port 81.", func() {
testCannotConnect(f, f.Namespace, "client-b", service, 81)
})
})
// i think we should qualify this as 'exactly' vs 'one or more'
ginkgo.It("should allow ingress access from namespace on one named port [Feature:NetworkPolicy]", func() {
nsBName := f.BaseName + "-b"
nsB, err := f.CreateNamespace(nsBName, map[string]string{
"ns-name": nsBName,
})
framework.ExpectNoError(err, "Error creating namespace %v: %v", nsBName, err)
const allowedPort = 80
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-client-in-ns-b-via-named-port-ingress-rule",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy to the Server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServerLabelSelector,
},
},
// Allow traffic to only one named port: "serve-80" from namespace-b.
Ingress: []networkingv1.NetworkPolicyIngressRule{{
From: []networkingv1.NetworkPolicyPeer{{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"ns-name": nsBName,
},
},
}},
Ports: []networkingv1.NetworkPolicyPort{{
Port: &intstr.IntOrString{Type: intstr.String, StrVal: "serve-80"},
}},
}},
},
}
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy)
// use client-b for a simple delta here
testCannotConnect(f, f.Namespace, "client-a", service, allowedPort)
testCanConnect(f, nsB, "client-b", service, allowedPort)
})
// we dont specify the namedport/dns dependency explicitly here, i think we should.
ginkgo.It("should allow egress access on one named port [Feature:NetworkPolicy]", func() {
clientPodName := "client-a"
protocolUDP := v1.ProtocolUDP
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-client-a-via-named-port-egress-rule",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy to client-a
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": clientPodName,
},
},
// Allow traffic to only one named port: "serve-80".
Egress: []networkingv1.NetworkPolicyEgressRule{{
Ports: []networkingv1.NetworkPolicyPort{
{
Port: &intstr.IntOrString{Type: intstr.String, StrVal: "serve-80"},
},
// Allow DNS look-ups
{
Protocol: &protocolUDP,
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53},
},
},
}},
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
// mention dns lookup here
// also we should test an external namespace, since that is done in other tests.
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, clientPodName, service, 80)
})
ginkgo.By("Creating client-a which should not be able to contact the server on port 81.", func() {
testCannotConnect(f, f.Namespace, clientPodName, service, 81)
})
})
ginkgo.It("should enforce updated policy [Feature:NetworkPolicy]", func() {
const (
clientAAllowedPort = 80
clientANotAllowedPort = 81
)
// should we have one testCanConnect up here, so that the Cannot connect clearely represents a delta?
// i wonder wether the tests here should be exported as YAML and placed in kubernetes/examples somehow...
ginkgo.By("Creating a network policy for the Service which allows traffic from pod at a port")
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-ingress",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply to server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServerLabelSelector,
},
},
// Allow traffic only to one port.
Ingress: []networkingv1.NetworkPolicyIngressRule{{
From: []networkingv1.NetworkPolicyPeer{{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": "client-a",
},
},
}},
Ports: []networkingv1.NetworkPolicyPort{{
Port: &intstr.IntOrString{IntVal: clientAAllowedPort},
}},
}},
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
testCanConnect(f, f.Namespace, "client-a", service, clientAAllowedPort)
err = f.WaitForPodNotFound("client-a", framework.PodDeleteTimeout)
framework.ExpectNoError(err, "Expected pod to be not found.")
testCannotConnect(f, f.Namespace, "client-b", service, clientAAllowedPort)
err = f.WaitForPodNotFound("client-b", framework.PodDeleteTimeout)
framework.ExpectNoError(err, "Expected pod to be not found.")
testCannotConnect(f, f.Namespace, "client-a", service, clientANotAllowedPort)
err = f.WaitForPodNotFound("client-a", framework.PodDeleteTimeout)
framework.ExpectNoError(err, "Expected pod to be not found.")
const (
clientBAllowedPort = 81
clientBNotAllowedPort = 80
)
ginkgo.By("Updating a network policy for the Service which allows traffic from another pod at another port.")
policy = &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-ingress",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply to server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServerLabelSelector,
},
},
// Allow traffic only to one port.
Ingress: []networkingv1.NetworkPolicyIngressRule{{
From: []networkingv1.NetworkPolicyPeer{{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": "client-b",
},
},
}},
Ports: []networkingv1.NetworkPolicyPort{{
Port: &intstr.IntOrString{IntVal: clientBAllowedPort},
}},
}},
},
}
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Update(policy)
framework.ExpectNoError(err, "Error updating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy)
testCannotConnect(f, f.Namespace, "client-b", service, clientBNotAllowedPort)
err = f.WaitForPodNotFound("client-b", framework.PodDeleteTimeout)
framework.ExpectNoError(err, "Expected pod to be not found.")
testCannotConnect(f, f.Namespace, "client-a", service, clientBNotAllowedPort)
testCanConnect(f, f.Namespace, "client-b", service, clientBAllowedPort)
})
ginkgo.It("should allow ingress access from updated namespace [Feature:NetworkPolicy]", func() {
nsA := f.Namespace
nsBName := f.BaseName + "-b"
newNsBName := nsBName + "-updated"
nsB, err := f.CreateNamespace(nsBName, map[string]string{
"ns-name": nsBName,
})
framework.ExpectNoError(err, "Error creating namespace %v: %v", nsBName, err)
const allowedPort = 80
// Create Policy for that service that allows traffic only via namespace B
ginkgo.By("Creating a network policy for the server which allows traffic from namespace-b.")
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-ns-b-via-namespace-selector",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServerLabelSelector,
},
},
Ingress: []networkingv1.NetworkPolicyIngressRule{{
From: []networkingv1.NetworkPolicyPeer{{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"ns-name": newNsBName,
},
},
}},
}},
},
}
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(policy)
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy)
// #t1 will reference this later.
testCannotConnect(f, nsB, "client-a", service, allowedPort)
nsB, err = f.ClientSet.CoreV1().Namespaces().Get(nsB.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Error getting Namespace %v: %v", nsB.ObjectMeta.Name, err)
nsB.ObjectMeta.Labels["ns-name"] = newNsBName
nsB, err = f.ClientSet.CoreV1().Namespaces().Update(nsB)
framework.ExpectNoError(err, "Error updating Namespace %v: %v", nsB.ObjectMeta.Name, err)
// for testCanConnect to be meaningful, it should be identical to #t1 above
// also, we probably should be verifying here that other namespaces can still *not* connect.
testCanConnect(f, nsB, "client-b", service, allowedPort)
})
ginkgo.It("should allow ingress access from updated pod [Feature:NetworkPolicy]", func() {
const allowedPort = 80
ginkgo.By("Creating a network policy for the server which allows traffic from client-a-updated.")
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-pod-b-via-pod-selector",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServerLabelSelector,
},
},
Ingress: []networkingv1.NetworkPolicyIngressRule{{
From: []networkingv1.NetworkPolicyPeer{{
PodSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{
Key: "pod-name",
Operator: metav1.LabelSelectorOpDoesNotExist,
}},
},
}},
}},
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", "client-a", service.Name))
podClient := createNetworkClientPod(f, f.Namespace, "client-a", service, allowedPort)
defer func() {
ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name))
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podClient.Name, nil); err != nil {
framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
}
}()
checkNoConnectivity(f, f.Namespace, podClient, service)
ginkgo.By(fmt.Sprintf("Updating client pod %s that should successfully connect to %s.", podClient.Name, service.Name))
podClient = updateNetworkClientPodLabel(f, f.Namespace, podClient.Name, "replace", "/metadata/labels", map[string]string{})
checkConnectivity(f, f.Namespace, podClient, service)
})
// we should say "matching BOTH podSelector AND Namespaceselector"
ginkgo.It("should enforce egress policy allowing traffic to a server in a different namespace based PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() {
var nsBserviceA, nsBserviceB *v1.Service
var nsBpodServerA, nsBpodServerB *v1.Pod
nsA := f.Namespace
nsBName := f.BaseName + "-b"
nsB, err := f.CreateNamespace(nsBName, map[string]string{
"ns-name": nsBName,
})
framework.ExpectNoError(err, "Error occurred while creating namespace-b.")
// Creating pods and services in namespace-b
//call these services "foo" and "bar" to differentiate namespace titles "a/b", i.e. nsBpodFoo, nsBpodBar
nsBpodServerA, nsBserviceA = createServerPodAndService(f, nsB, "ns-b-server-a", []int{80})
nsBpodServerB, nsBserviceB = createServerPodAndService(f, nsB, "ns-b-server-b", []int{80})
// Wait for Server with Service in NS-A to be ready
framework.Logf("Waiting for servers to come up.")
// shouldnt this be part of the Giinkgo.Before() ? podServer is part of the bootstrapped setup in BeforeEach()
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, podServer)
framework.ExpectNoError(err, "Error occurred while waiting for pod status in namespace: Running.")
// Wait for Servers with Services in NS-B to be ready
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, nsBpodServerA)
framework.ExpectNoError(err, "Error occurred while waiting for pod status in namespace: Running.")
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, nsBpodServerB)
framework.ExpectNoError(err, "Error occurred while waiting for pod status in namespace: Running.")
defer cleanupServerPodAndService(f, nsBpodServerA, nsBserviceA)
defer cleanupServerPodAndService(f, nsBpodServerB, nsBserviceB)
ginkgo.By("Creating a network policy for the server which allows traffic only to a server in different namespace.")
protocolUDP := v1.ProtocolUDP
policyAllowToServerInNSB := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Namespace: nsA.Name,
Name: "allow-to-ns-b-server-a-via-namespace-selector",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy to the client
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": "client-a",
},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
// Allow traffic only to server-a in namespace-b
Egress: []networkingv1.NetworkPolicyEgressRule{
{
Ports: []networkingv1.NetworkPolicyPort{
// Allow DNS look-ups
{
Protocol: &protocolUDP,
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53},
},
},
},
{
To: []networkingv1.NetworkPolicyPeer{
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"ns-name": nsBName,
},
},
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": nsBpodServerA.ObjectMeta.Labels["pod-name"],
},
},
},
},
},
},
},
}
policyAllowToServerInNSB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyAllowToServerInNSB)
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowToServerInNSB.")
defer cleanupNetworkPolicy(f, policyAllowToServerInNSB)
// minor:
// Mentioned this earlier, but i think maybe we want to test both ports
// Also, i think foo and bar make more sense for these pod servers.
ginkgo.By("Creating client-a, in 'namespace-a', which should be able to contact the server-a in namespace-b.", func() {
testCanConnect(f, nsA, "client-a", nsBserviceA, 80)
})
ginkgo.By("Creating client-a, in 'namespace-a', which should not be able to contact the server-b in namespace-b.", func() {
testCannotConnect(f, nsA, "client-a", nsBserviceB, 80)
})
ginkgo.By("Creating client-a, in 'namespace-a', which should not be able to contact the server in namespace-a.", func() {
testCannotConnect(f, nsA, "client-a", service, 80)
})
})
ginkgo.It("should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]", func() {
ginkgo.By("Creating a network policy for the server which allows traffic only from client-b.")
policyAllowOnlyFromClientB := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: "allow-from-client-b-pod-selector",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy to the Server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServerLabelSelector,
},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
// Allow traffic only from "client-b"
Ingress: []networkingv1.NetworkPolicyIngressRule{{
From: []networkingv1.NetworkPolicyPeer{{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": "client-b",
},
},
}},
}},
},
}
policyAllowOnlyFromClientB, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyAllowOnlyFromClientB)
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowOnlyFromClientB.")
defer cleanupNetworkPolicy(f, policyAllowOnlyFromClientB)
ginkgo.By("Creating client-a which should not be able to contact the server.", func() {
testCannotConnect(f, f.Namespace, "client-a", service, 80)
})
ginkgo.By("Creating client-b which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-b", service, 80)
})
ginkgo.By("Creating a network policy for the server which allows traffic from all clients.")
policyIngressAllowAll := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
//Namespace: f.Namespace.Name,
Name: "allow-all",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy to all pods
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
Ingress: []networkingv1.NetworkPolicyIngressRule{{}},
},
}
policyIngressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyIngressAllowAll)
framework.ExpectNoError(err, "Error occurred while creating policy: policyIngressAllowAll.")
defer cleanupNetworkPolicy(f, policyIngressAllowAll)
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80)
})
ginkgo.By("Creating client-b which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-b", service, 80)
})
})
ginkgo.It("should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]", func() {
podServerB, serviceB := createServerPodAndService(f, f.Namespace, "server-b", []int{80})
defer cleanupServerPodAndService(f, podServerB, serviceB)
ginkgo.By("Waiting for pod ready", func() {
err := f.WaitForPodReady(podServerB.Name)
framework.ExpectNoError(err, "Error occurred while waiting for pod type: Ready.")
})
protocolUDP := v1.ProtocolUDP
ginkgo.By("Creating client-a which should be able to contact the server before applying policy.", func() {
testCanConnect(f, f.Namespace, "client-a", serviceB, 80)
})
ginkgo.By("Creating a network policy for the server which allows traffic only to server-a.")
policyAllowOnlyToServerA := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: "allow-to-server-a-pod-selector",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy to the "client-a"
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": "client-a",
},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
// Allow traffic only to "server-a"
Egress: []networkingv1.NetworkPolicyEgressRule{
{
Ports: []networkingv1.NetworkPolicyPort{
// Allow DNS look-ups
{
Protocol: &protocolUDP,
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53},
},
},
},
{
To: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServerLabelSelector,
},
},
},
},
},
},
},
}
policyAllowOnlyToServerA, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyAllowOnlyToServerA)
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowOnlyToServerA.")
defer cleanupNetworkPolicy(f, policyAllowOnlyToServerA)
ginkgo.By("Creating client-a which should not be able to contact the server-b.", func() {
testCannotConnect(f, f.Namespace, "client-a", serviceB, 80)
})
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80)
})
ginkgo.By("Creating a network policy which allows traffic to all pods.")
policyEgressAllowAll := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-all",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy to all pods
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
Egress: []networkingv1.NetworkPolicyEgressRule{{}},
},
}
policyEgressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyEgressAllowAll)
framework.ExpectNoError(err, "Error occurred while creating policy: policyEgressAllowAll.")
defer cleanupNetworkPolicy(f, policyEgressAllowAll)
ginkgo.By("Creating client-a which should be able to contact the server-b.", func() {
testCanConnect(f, f.Namespace, "client-a", serviceB, 80)
})
ginkgo.By("Creating client-a which should be able to contact the server-a.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80)
})
})
ginkgo.It("should stop enforcing policies after they are deleted [Feature:NetworkPolicy]", func() {
ginkgo.By("Creating a network policy for the server which denies all traffic.")
policyDenyAll := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: "deny-all",
},
Spec: networkingv1.NetworkPolicySpec{
// Deny all traffic
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
Ingress: []networkingv1.NetworkPolicyIngressRule{},
},
}
policyDenyAll, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyDenyAll)
framework.ExpectNoError(err, "Error occurred while creating policy: policyDenyAll.")
ginkgo.By("Creating client-a which should not be able to contact the server.", func() {
testCannotConnect(f, f.Namespace, "client-a", service, 80)
})
ginkgo.By("Creating a network policy for the server which allows traffic only from client-a.")
policyAllowFromClientA := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: "allow-from-client-a-pod-selector",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy to the Server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServerLabelSelector,
},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
// Allow traffic from "client-a"
Ingress: []networkingv1.NetworkPolicyIngressRule{{
From: []networkingv1.NetworkPolicyPeer{{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": "client-a",
},
},
}},
}},
},
}
policyAllowFromClientA, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyAllowFromClientA)
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowFromClientA.")
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80)
})
ginkgo.By("Deleting the network policy allowing traffic from client-a")
cleanupNetworkPolicy(f, policyAllowFromClientA)
ginkgo.By("Creating client-a which should not be able to contact the server.", func() {
testCannotConnect(f, f.Namespace, "client-a", service, 80)
})
ginkgo.By("Deleting the network policy denying all traffic.")
cleanupNetworkPolicy(f, policyDenyAll)
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80)
})
})
// we should update this description to clarify that we only test one pod with a /32, and btw,
// is this something we can run on ipv6 clusters ?
ginkgo.It("should allow egress access to server in CIDR block [Feature:NetworkPolicy]", func() {
var serviceB *v1.Service
var podServerB *v1.Pod
protocolUDP := v1.ProtocolUDP
// Getting podServer's status to get podServer's IP, to create the CIDR
podServerStatus, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(podServer.Name, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Error occurred while getting pod status.")
}
podServerCIDR := fmt.Sprintf("%s/32", podServerStatus.Status.PodIP)
// Creating pod-b and service-b
podServerB, serviceB = createServerPodAndService(f, f.Namespace, "pod-b", []int{80})
ginkgo.By("Waiting for pod-b to be ready", func() {
err = f.WaitForPodReady(podServerB.Name)
framework.ExpectNoError(err, "Error occurred while waiting for pod type: Ready.")
})
defer cleanupServerPodAndService(f, podServerB, serviceB)
// Wait for podServerB with serviceB to be ready
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, podServerB)
framework.ExpectNoError(err, "Error occurred while waiting for pod status in namespace: Running.")
ginkgo.By("Creating client-a which should be able to contact the server-b.", func() {
testCanConnect(f, f.Namespace, "client-a", serviceB, 80)
})
policyAllowCIDR := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: "allow-client-a-via-cidr-egress-rule",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy to the Server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": "client-a",
},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
// Allow traffic to only one CIDR block.
Egress: []networkingv1.NetworkPolicyEgressRule{
{
Ports: []networkingv1.NetworkPolicyPort{
// Allow DNS look-ups
{
Protocol: &protocolUDP,
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53},
},
},
},
{
To: []networkingv1.NetworkPolicyPeer{
{
IPBlock: &networkingv1.IPBlock{
CIDR: podServerCIDR,
},
},
},
},
},
},
}
policyAllowCIDR, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyAllowCIDR)
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDR.")
defer cleanupNetworkPolicy(f, policyAllowCIDR)
// for parity with other tests, should we have multiple 'cannotConnect' verifications here?
ginkgo.By("Creating client-a which should not be able to contact the server-b.", func() {
testCannotConnect(f, f.Namespace, "client-a", serviceB, 80)
})
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80)
})
})
// i think we also need to make sure there is a test for enforcing multiple policies with a NamespaceSelector.
// i think we mean "should enforce multiple separate policies against the same PodSelector"
ginkgo.It("should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]", func() {
var serviceA, serviceB *v1.Service
var podA, podB *v1.Pod
var err error
protocolUDP := v1.ProtocolUDP
// Before applying policy, communication should be successful between pod-a and pod-b
podA, serviceA = createServerPodAndService(f, f.Namespace, "pod-a", []int{80})
ginkgo.By("Waiting for pod-a to be ready", func() {
err = f.WaitForPodReady(podA.Name)
framework.ExpectNoError(err, "Error occurred while waiting for pod type: Ready.")
})
ginkgo.By("Creating client pod-b which should be able to contact the server pod-a.", func() {
testCanConnect(f, f.Namespace, "pod-b", serviceA, 80)
})
cleanupServerPodAndService(f, podA, serviceA)
podB, serviceB = createServerPodAndService(f, f.Namespace, "pod-b", []int{80})
ginkgo.By("Waiting for pod-b to be ready", func() {
err = f.WaitForPodReady(podB.Name)
framework.ExpectNoError(err, "Error occurred while waiting for pod type: Ready.")
})
ginkgo.By("Creating client pod-a which should be able to contact the server pod-b.", func() {
testCanConnect(f, f.Namespace, "pod-a", serviceB, 80)
})
ginkgo.By("Creating a network policy for pod-a which allows Egress traffic to pod-b.")
policyAllowToPodB := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: "allow-pod-a-to-pod-b-using-pod-selector",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy on pod-a
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": "pod-a",
},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
// Allow traffic to server on pod-b
Egress: []networkingv1.NetworkPolicyEgressRule{
{
Ports: []networkingv1.NetworkPolicyPort{
// Allow DNS look-ups
{
Protocol: &protocolUDP,
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53},
},
},
},
{
To: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": "pod-b",
},
},
},
},
},
},
},
}
policyAllowToPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyAllowToPodB)
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowToPodB.")
defer cleanupNetworkPolicy(f, policyAllowToPodB)
ginkgo.By("Creating a network policy for pod-a that denies traffic from pod-b.")
policyDenyFromPodB := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: "deny-pod-b-to-pod-a-pod-selector",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy on the server on pod-a
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": "pod-a",
},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
// Deny traffic from all pods, including pod-b
Ingress: []networkingv1.NetworkPolicyIngressRule{},
},
}
policyDenyFromPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyDenyFromPodB)
framework.ExpectNoError(err, "Error occurred while creating policy: policyDenyFromPodB.")
defer cleanupNetworkPolicy(f, policyDenyFromPodB)
ginkgo.By("Creating client pod-a which should be able to contact the server pod-b.", func() {
testCanConnect(f, f.Namespace, "pod-a", serviceB, 80)
})
cleanupServerPodAndService(f, podB, serviceB)
// Creating server pod with label "pod-name": "pod-a" to deny traffic from client pod with label "pod-name": "pod-b"
podA, serviceA = createServerPodAndService(f, f.Namespace, "pod-a", []int{80})
ginkgo.By("Waiting for pod-a to be ready", func() {
err = f.WaitForPodReady(podA.Name)
framework.ExpectNoError(err, "Error occurred while waiting for pod type: Ready.")
})
ginkgo.By("Creating client pod-b which should be able to contact the server pod-a.", func() {
testCannotConnect(f, f.Namespace, "pod-b", serviceA, 80)
})
cleanupServerPodAndService(f, podA, serviceA)
})
})
})
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment