Last active
March 23, 2019 03:14
-
-
Save pmint93/cb87c5f46502ce8047a084238cad03e4 to your computer and use it in GitHub Desktop.
Deploy rabbitmq-autocluster on k8s with persistent storage
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
set -eo pipefail | |
export KUBE_NAMESPACE=test | |
export REPLICA_COUNT=3 | |
cat <<EOF | kubectl apply -f - | |
apiVersion: apps/v1beta1 | |
kind: StatefulSet | |
metadata: | |
name: rabbitmq | |
namespace: $KUBE_NAMESPACE | |
spec: | |
serviceName: rabbitmq | |
replicas: $REPLICA_COUNT | |
template: | |
metadata: | |
labels: | |
app: rabbitmq | |
spec: | |
terminationGracePeriodSeconds: 10 | |
containers: | |
- name: rabbitmq-autocluster | |
image: pivotalrabbitmq/rabbitmq-autocluster | |
ports: | |
- name: http | |
protocol: TCP | |
containerPort: 15672 | |
- name: amqp | |
protocol: TCP | |
containerPort: 5672 | |
livenessProbe: | |
exec: | |
command: ["rabbitmqctl", "status"] | |
initialDelaySeconds: 30 | |
timeoutSeconds: 5 | |
readinessProbe: | |
exec: | |
command: ["rabbitmqctl", "status"] | |
initialDelaySeconds: 10 | |
timeoutSeconds: 5 | |
imagePullPolicy: Always | |
env: | |
- name: MY_POD_IP | |
valueFrom: | |
fieldRef: | |
fieldPath: status.podIP | |
- name: NAMESPACE | |
valueFrom: | |
fieldRef: | |
fieldPath: metadata.namespace | |
- name: HOSTNAME | |
valueFrom: | |
fieldRef: | |
fieldPath: metadata.name | |
- name: RABBITMQ_USE_LONGNAME | |
value: "true" | |
- name: RABBITMQ_NODENAME | |
value: "rabbit@\$(HOSTNAME).rabbitmq.\$(NAMESPACE).svc.cluster.local" | |
- name: AUTOCLUSTER_TYPE | |
value: "k8s" | |
- name: AUTOCLUSTER_DELAY | |
value: "10" | |
- name: K8S_ADDRESS_TYPE | |
value: "hostname" | |
- name: K8S_SERVICE_NAME | |
value: rabbitmq | |
- name: K8S_HOSTNAME_SUFFIX | |
value: ".rabbitmq.\$(NAMESPACE).svc.cluster.local" | |
- name: AUTOCLUSTER_CLEANUP | |
value: "false" | |
- name: CLEANUP_WARN_ONLY | |
value: "true" | |
volumeMounts: | |
- name: rabbitmq-data | |
mountPath: /var/lib/rabbitmq/mnesia | |
volumeClaimTemplates: | |
- metadata: | |
name: rabbitmq-data | |
annotations: | |
volume.beta.kubernetes.io/storage-class: gp2 | |
spec: | |
accessModes: [ "ReadWriteOnce" ] | |
resources: | |
requests: | |
storage: 4Gi | |
EOF | |
# Headless service for Pod DNS | |
cat <<EOF | kubectl apply -f - | |
kind: Service | |
apiVersion: v1 | |
metadata: | |
namespace: $KUBE_NAMESPACE | |
name: rabbitmq | |
labels: | |
app: rabbitmq | |
spec: | |
clusterIP: None | |
ports: | |
- name: http | |
protocol: TCP | |
port: 15672 | |
targetPort: 15672 | |
- name: amqp | |
protocol: TCP | |
port: 5672 | |
targetPort: 5672 | |
selector: | |
app: rabbitmq | |
EOF | |
# LoadBalancer service for public access | |
cat <<EOF | kubectl apply -f - | |
kind: Service | |
apiVersion: v1 | |
metadata: | |
namespace: $KUBE_NAMESPACE | |
name: rabbitmq-lb | |
labels: | |
app: rabbitmq | |
type: LoadBalancer | |
spec: | |
type: LoadBalancer | |
ports: | |
- name: http | |
protocol: TCP | |
port: 15672 | |
targetPort: 15672 | |
- name: amqp | |
protocol: TCP | |
port: 5672 | |
targetPort: 5672 | |
selector: | |
app: rabbitmq | |
EOF | |
echo "Waiting for StatefulSet to complete rolled out" | |
# kubectl rollout status statefulset/rabbitmq -n $KUBE_NAMESPACE # Not supported in k8s v1.6 and prior | |
for i in $(seq 1 120); do | |
if kubectl exec rabbitmq-$(($REPLICA_COUNT - 1)) -n $KUBE_NAMESPACE -- rabbitmqctl status &> /dev/null; then | |
break | |
fi | |
sleep 1s | |
done | |
if [[ "$i" == 120 ]]; then | |
echo "StatefulSet taking too long to complete, you need to manual install..." | |
exit 1 | |
fi | |
echo "Setting up HA policy" | |
kubectl exec rabbitmq-0 -n $KUBE_NAMESPACE -- rabbitmqctl set_policy ha-all "" '{"ha-mode":"all","ha-sync-mode":"automatic"}' |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
value: "false"
I know this is one way to solve the problem, but this is dangerous. We already try this way, but some time will partition. So will have two clusters. Some client will connet to part one, some will connect part two.
So, it is not my suggestion. If not use this, one is down, but the cluster can work also.