Skip to content

Instantly share code, notes, and snippets.

@nicolas-g
Last active April 19, 2018 21:35
Show Gist options
  • Save nicolas-g/db571d150896fd9a6a82bcf2d7280104 to your computer and use it in GitHub Desktop.
Save nicolas-g/db571d150896fd9a6a82bcf2d7280104 to your computer and use it in GitHub Desktop.
Kubernetes Yaml files
# NG: This was taken from https://j.hept.io/contour-deployment-rbac
# or https://raw.githubusercontent.com/heptio/contour/master/deployment/render/deployment-rbac.yaml
# following the tutorial https://blog.heptio.com/how-to-deploy-web-applications-on-kubernetes-with-heptio-contour-and-lets-encrypt-d58efbad9f56
#
#
# This file is generated from the individual yaml files by deployment/render.sh.
# Do not edit this file directly but instead edit the source files and
# re-render.
apiVersion: v1
kind: Namespace
metadata:
name: heptio-contour
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: contour
namespace: heptio-contour
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: contour
name: contour
namespace: heptio-contour
spec:
selector:
matchLabels:
app: contour
replicas: 2
template:
metadata:
labels:
app: contour
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9001"
prometheus.io/path: "/stats"
prometheus.io/format: "prometheus"
spec:
containers:
- image: docker.io/envoyproxy/envoy-alpine:v1.6.0
name: envoy
ports:
- containerPort: 8080
name: http
- containerPort: 8443
name: https
command: ["envoy"]
args: ["-c", "/config/contour.yaml", "--service-cluster", "cluster0", "--service-node", "node0", "-l", "info", "--v2-config-only"]
volumeMounts:
- name: contour-config
mountPath: /config
- image: gcr.io/heptio-images/contour:master
imagePullPolicy: Always
name: contour
command: ["contour"]
args: ["serve", "--incluster"]
initContainers:
- image: gcr.io/heptio-images/contour:master
imagePullPolicy: Always
name: envoy-initconfig
command: ["contour"]
args: ["bootstrap", "/config/contour.yaml"]
volumeMounts:
- name: contour-config
mountPath: /config
volumes:
- name: contour-config
emptyDir: {}
dnsPolicy: ClusterFirst
serviceAccountName: contour
terminationGracePeriodSeconds: 30
# The affinity stanza below tells Kubernetes to try hard not to place 2 of
# these pods on the same node.
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app: contour
topologyKey: kubernetes.io/hostname
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: contour
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: contour
subjects:
- kind: ServiceAccount
name: contour
namespace: heptio-contour
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: contour
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: v1
kind: Service
metadata:
name: contour
namespace: heptio-contour
annotations:
# This annotation puts the AWS ELB into "TCP" mode so that it does not
# do HTTP negotiation for HTTPS connections at the ELB edge.
# The downside of this is the remote IP address of all connections will
# appear to be the internal address of the ELB. See docs/proxy-proto.md
# for information about enabling the PROXY protocol on the ELB to recover
# the original remote IP address.
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp
spec:
ports:
- port: 80
name: http
protocol: TCP
targetPort: 8080
- port: 443
name: https
protocol: TCP
targetPort: 8443
selector:
app: contour
type: LoadBalancer
---
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: dashboard-proxy-tls
namespace: kube-system
spec:
secretName: dashboard-proxy-tls
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
commonName: k8s.i.example.com
dnsNames:
- k8s.i.example.com
acme:
config:
- http01: {}
domains:
- k8s.i.example.com
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: dashboard-proxy
name: dashboard-proxy
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: dashboard-proxy
template:
metadata:
labels:
app: dashboard-proxy
spec:
containers:
- args:
- --cookie-secure=false
- --provider=github
- --upstream=http://kubernetes-dashboard.kube-system.svc.cluster.local
- --http-address=0.0.0.0:8080
- --redirect-url=https://k8s.i.example.com/oauth2/callback
- --email-domain=*
- --github-org=YOUR-ORG
- --pass-basic-auth=false
- --pass-access-token=false
env:
- name: OAUTH2_PROXY_COOKIE_SECRET
valueFrom:
secretKeyRef:
key: cookie
name: dashboard-proxy-secret
- name: OAUTH2_PROXY_CLIENT_ID
valueFrom:
secretKeyRef:
key: client-id
name: dashboard-proxy-secret
- name: OAUTH2_PROXY_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: client-secret
name: dashboard-proxy-secret
image: a5huynh/oauth2_proxy:2.2
name: oauth-proxy
ports:
- containerPort: 8080
protocol: TCP
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: dashboard-proxy
namespace: kube-system
annotations:
kubernetes.io/ingress.class: contour
spec:
rules:
- host: k8s.i.example.com
http:
paths:
- backend:
serviceName: dashboard-proxy
servicePort: 8080
path: /
tls:
- hosts:
- k8s.i.example.com
secretName: dashboard-proxy-tls
apiVersion: v1
kind: Service
metadata:
labels:
run: dashboard-proxy
name: dashboard-proxy
namespace: kube-system
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: dashboard-proxy
type: ClusterIP
# NG: taken from https://github.com/kubernetes/helm/blob/master/docs/rbac.md#example-service-account-with-cluster-admin-role
#
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
# NG : https://github.com/kubernetes/helm/blob/master/docs/rbac.md#example-deploy-tiller-in-a-namespace-restricted-to-deploying-resources-only-in-that-namespace
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: tiller-manager
namespace: tiller-world
rules:
- apiGroups: ["", "extensions", "apps"]
resources: ["*"]
verbs: ["*"]
# NG: https://github.com/kubernetes/helm/blob/master/docs/rbac.md#example-deploy-tiller-in-a-namespace-restricted-to-deploying-resources-only-in-that-namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: tiller-binding
namespace: tiller-world
subjects:
- kind: ServiceAccount
name: tiller
namespace: tiller-world
roleRef:
kind: Role
name: tiller-manager
apiGroup: rbac.authorization.k8s.io
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment