Create namespace
$ kubectl create ns policy-demo
Disallow ingress
$ kubectl annotate ns policy-demo "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}"
Allow egress in the namespace
apiVersion: "alpha.projectcalico.org/v1"
kind: SystemNetworkPolicy
metadata:
name: ns-egress
namespace: kube-system
spec:
selector: calico/k8s_ns == 'policy-demo'
order: 500
egress:
- action: deny
destination:
notSelector: calico/k8s_ns == 'policy-demo'
Allow ingress to pods with label nginx from pods with label access
apiVersion: "alpha.projectcalico.org/v1"
kind: SystemNetworkPolicy
metadata:
name: policy-demo-app-class
namespace: kube-system
spec:
order: 0
selector: calico/k8s_ns == 'policy-demo'
ingress:
- action: allow
protocol: tcp
source:
selector: calico/k8s_ns == 'policy-demo' && run == 'access'
destination:
selector: calico/k8s_ns == 'policy-demo' && run == 'nginx'
ports: [80]
Run nginx
kubectl run --namespace=policy-demo nginx --replicas=2 --image=nginx &&\
kubectl expose --namespace=policy-demo deployment nginx --port=80
Run busybox
kubectl run --namespace=policy-demo access --rm -ti --image busybox /bin/sh
Call the cluster-ip of the nginx service, observe it times out.
# Calico Kubernetes Datastore Hosted Install
# Calico policy-only with user-supplied networking
# http://docs.projectcalico.org/v2.2/getting-started/kubernetes/installation/hosted/kubernetes-datastore/
#
# This ConfigMap is used to configure a self-hosted Calico installation.
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-calico-cfg
namespace: kube-system
data:
# The CNI network configuration to install on each node.
# http://docs.projectcalico.org/v2.2/reference/cni-plugin/configuration
# https://github.com/containernetworking/cni/blob/master/SPEC.md#network-configuration
# depends on flannel to perform networking
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"type": "calico",
"log_level": "WARNING",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://calico-api.josh-test-dns.com:443",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kube-calico
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/status
verbs:
- update
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- thirdpartyresources
verbs:
- create
- get
- list
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["projectcalico.org"]
resources:
- globalconfigs
verbs:
- create
- get
- list
- update
- watch
- apiGroups: ["projectcalico.org"]
resources:
- ippools
verbs:
- create
- delete
- get
- list
- update
- watch
- apiGroups: ["alpha.projectcalico.org"]
resources:
- systemnetworkpolicies
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kube-calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-calico
subjects:
- kind: ServiceAccount
name: kube-calico
namespace: kube-system
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-calico
namespace: kube-system
labels:
k8s-app: kube-calico
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
k8s-app: kube-calico
template:
metadata:
labels:
k8s-app: kube-calico
annotations:
# This, along with the CriticalAddonsOnly toleration below,
# marks the pod as a critical add-on, ensuring it gets
# priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
serviceAccountName: kube-calico
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy
- name: kube-calico
image: quay.io/calico/node:v1.3.0
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Enable felix info logging.
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPV6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# The Calico IPv4 pool to use. This should match `--cluster-cidr`
- name: CALICO_IPV4POOL_CIDR
value: "10.2.0.0/16"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "always"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# No IP address needed.
- name: IP
value: ""
securityContext:
privileged: true
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.9.1-4-g23fcd5f
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kube-calico-cfg
key: cni_network_config
- name: CNI_NET_DIR
value: "/etc/kubernetes/cni/net.d"
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
volumes:
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/kubernetes/cni/net.d
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-calico
namespace: kube-system