Skip to content

Instantly share code, notes, and snippets.

@jpkrohling
Last active June 9, 2017 14:51
Show Gist options
  • Save jpkrohling/ef5999334dc488a838fb45ae459174e9 to your computer and use it in GitHub Desktop.
Save jpkrohling/ef5999334dc488a838fb45ae459174e9 to your computer and use it in GitHub Desktop.
Provision CentOS and install Origin with Logging and Metrics via Ansible
###### This is not a shell script that you just execute and everything works.
###### There are two sections: one to run on your host, and one to run on the VM
###### On the host, there's a manual step
###### On the VM, you can just copy/paste, perhaps pausing before the Ansible command itself
## provision the VM
IMAGES_DIR="/mnt/storage/jpkroehling/Tools/images"
LOGGING_HOSTNAME="logging.dev.kroehling.de"
cd "${IMAGES_DIR}"
echo "thepassword123" > /tmp/rootpw
virt-builder centos-7.3 -o logging.qcow2 --size 20G --format qcow2 --hostname logging --root-password file:/tmp/rootpw
sudo virt-install --import --os-variant=centos7.0 --memory 8096 --vcpus 4 --name logging --disk logging.qcow2 --noautoconsole
echo "Log into the machine via 'sudo virsh console logging', get the IP, add it to the /etc/hosts for the hostname ${LOGGING_HOSTNAME}"
LOGGING_VM_IP="192.168.124.55"
HOSTS_ENTRY="${LOGGING_VM_IP} ${LOGGING_HOSTNAME} openshift.${LOGGING_HOSTNAME} kibana.${LOGGING_HOSTNAME} mux.${LOGGING_HOSTNAME}"
sudo su -c "echo $HOSTS_ENTRY >> /etc/hosts"
ssh-copy-id root@${LOGGING_HOSTNAME}
ssh root@${LOGGING_HOSTNAME}
### on the VM
LOGGING_HOSTNAME="logging.dev.kroehling.de"
LOGGING_VM_IP="192.168.124.55"
HOSTS_ENTRY="${LOGGING_VM_IP} ${LOGGING_HOSTNAME} openshift.${LOGGING_HOSTNAME} kibana.${LOGGING_HOSTNAME} mux.${LOGGING_HOSTNAME}"
for port in 22 80 443 8443 24284
do
firewall-cmd --add-port=$port/tcp
firewall-cmd --add-port=$port/tcp --permanent
done
curl https://raw.githubusercontent.com/ViaQ/Main/master/centos7-viaq.repo > /etc/yum.repos.d/centos7-viaq.repo
# we just want the dependencies, so, install (with dependencies) and remove it
yum install screen openshift-ansible git python2-passlib -y
yum remove openshift-ansible -y
mkdir -p /opt/ansible
cd /opt/ansible
git clone https://github.com/jpkrohling/openshift-ansible.git . -b JPK-HawkularAlertsWithLogging
curl https://raw.githubusercontent.com/ViaQ/Main/master/vars.yaml.template > vars.yaml
echo "${LOGGING_HOSTNAME}" > /etc/hostname
sudo su -c "echo $HOSTS_ENTRY >> /etc/hosts"
cat > /opt/ansible/ansible-inventory <<EOF
[OSEv3:children]
nodes
masters
[OSEv3:vars]
short_version=1.5
ansible_connection=local
openshift_release=v1.5
openshift_deployment_type=origin
openshift_master_identity_providers=[{'challenge': 'true', 'login': 'true', 'kind': 'AllowAllPasswordIdentityProvider', 'name': 'allow_all'}]
openshift_disable_check=disk_availability,memory_availability
openshift_hosted_logging_deploy=true
openshift_logging_install_logging=true
openshift_logging_image_prefix=docker.io/openshift/origin-
openshift_logging_image_version=v1.5.1
openshift_logging_namespace=logging
openshift_logging_es_cluster_size=1
openshift_hosted_metrics_deploy=true
openshift_hosted_metrics_public_url=hawkular-metrics.app.${LOGGING_VM_IP}.nip.io
openshift_metrics_hawkular_hostname=hawkular-metrics.app.${LOGGING_VM_IP}.nip.io
openshift_metrics_install_metrics=true
openshift_metrics_image_prefix=jpkroehling/origin-
openshift_metrics_image_version=dev
openshift_metrics_cassandra_replicas=1
[nodes]
localhost storage=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
[masters]
localhost storage=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
EOF
ANSIBLE_LOG_PATH=/tmp/ansible.log ansible-playbook -vvv -e @/opt/ansible/vars.yaml -i /opt/ansible/ansible-inventory /opt/ansible/playbooks/byo/config.yml
## remove the readiness probe from es and kibana
for component in es kibana
do
dc=$(oc get dc -n logging -l component=${component} -o name -n logging 2> /dev/null)
if [ $? != 0 ]; then
echo "Could not get the DC for component ${component}"
continue
fi
new_json=$(oc get ${dc} -o json -n logging 2> /dev/null | python -c 'import json, sys; hsh = json.loads(sys.stdin.read()); del hsh["spec"]["template"]["spec"]["containers"][0]["readinessProbe"]; print json.dumps(hsh)' 2> /dev/null)
if [ $? != 0 ]; then
echo "Could not remove the readiness probe from the component ${component}"
continue
fi
oc delete -n logging ${dc}
echo $new_json | oc create -n logging -f -
done
## add the hawkular user to the fluentd role
espod=$(oc get pods -n logging -l component=es -o name | awk -F\/ '{print $2}')
oc exec -n logging $espod -- curl -s -k \
--cert /etc/elasticsearch/secret/admin-cert --key /etc/elasticsearch/secret/admin-key \
https://localhost:9200/.searchguard.$espod/rolesmapping/0 | \
python -c 'import json, sys; hsh = json.loads(sys.stdin.read())["_source"]; hsh["sg_role_fluentd"]["users"].append("'hawkular'"); print json.dumps(hsh)' | \
oc exec -n logging -i $espod -- curl -s -k --cert /etc/elasticsearch/secret/admin-cert --key /etc/elasticsearch/secret/admin-key \
https://localhost:9200/.searchguard.$espod/rolesmapping/0 -XPUT -d@- | python -mjson.tool
## sudo make me a sandwich
oc adm policy add-cluster-role-to-user cluster-admin developer
### at this point, login with the `developer` user (password: developer) into the UI
### and look at the Hawkular Metrics logs: you should see that it failed to get the cert
### from logging, because logging was probably installed after metrics
### just scale down the metrics pod, and scale it up, and it should work
### test the connection to ES from the hawkular-metrics pod
### the following command should return a JSON like this:
# {
# "name" : "Harry Leland",
# "cluster_name" : "logging-es",
# "cluster_uuid" : "SkBgk-yZSTisQ7ScfGZhyg",
# "version" : {
# "number" : "2.4.4",
# "build_hash" : "fcbb46dfd45562a9cf00c604b30849a6dec6b017",
# "build_timestamp" : "2017-01-03T11:33:16Z",
# "build_snapshot" : false,
# "lucene_version" : "5.5.2"
# },
# "tagline" : "You Know, for Search"
# }
metrics_pod=$(oc get pods -l metrics-infra=hawkular-metrics -n openshift-infra -o name | awk -F\/ '{print $2}')
token=$(oc exec -n openshift-infra ${metrics_pod} cat /var/run/secrets/kubernetes.io/serviceaccount/token)
username=hawkular
curl \
-s \
-k \
-H "X-Forwarded-For: 127.0.0.1" \
-H "X-Proxy-Remote-User: $username" \
-H "Authorization: Bearer $token" \
https://logging-es.logging.svc.cluster.local:9200
### this is a sample call for a log being added to Elasticsearch, upon which Hawkular Alerts might react to:
metrics_pod=$(oc get pods -l metrics-infra=hawkular-metrics -n openshift-infra -o name | awk -F\/ '{print $2}')
token=$(oc exec -n openshift-infra ${metrics_pod} cat /var/run/secrets/kubernetes.io/serviceaccount/token)
username=hawkular
curl \
-k \
-v \
-H "X-Forwarded-For: 127.0.0.1" \
-H "X-Proxy-Remote-User: $username" \
-H "Authorization: Bearer $token" \
-XPOST \
https://logging-es.logging.svc.cluster.local:9200/log/org.hawkular.logging \
--data '{"@timestamp":"2017-05-31T15:30:32.051646+0000", "level":"ERROR", "app":"AppA", "message":"Message 27956 from Frontend"}'
metrics_pod=$(oc get pods -l metrics-infra=hawkular-metrics -n openshift-infra -o name | awk -F\/ '{print $2}')
token=$(oc exec -n openshift-infra ${metrics_pod} cat /var/run/secrets/kubernetes.io/serviceaccount/token)
username=hawkular
curl \
-s \
-k \
-H "X-Forwarded-For: 127.0.0.1" \
-H "X-Proxy-Remote-User: $username" \
-H "Authorization: Bearer $token" \
https://logging-es.logging.svc.cluster.local:9200/.operations.*/_search?sort=@timestamp:desc | python -mjson.tool
### verification steps
oc login -u developer -p developer
HAWKULAR_TOKEN=$(oc whoami -t)
HAWKULAR_HOST="hawkular-metrics.app.${LOGGING_VM_IP}.nip.io"
HAWKULAR_PORT=443
HAWKULAR_TENANT="my_tenant"
curl https://raw.githubusercontent.com/lucasponce/openshift-hawkular-metrics-scripts/master/trigger-definitions.json > /tmp/trigger-definitions.json
curl -k -v \
-H "Hawkular-Tenant: ${HAWKULAR_TENANT}" \
-H "Authorization: Bearer ${HAWKULAR_TOKEN}" \
-H "Content-Type: application/json" \
-d @/tmp/trigger-definitions.json \
https://${HAWKULAR_HOST}/hawkular/alerts/import/all | python -mjson.tool
curl -k -v \
-H "Hawkular-Tenant: ${HAWKULAR_TENANT}" \
-H "Authorization: Bearer ${HAWKULAR_TOKEN}" \
-H "Content-Type: application/json" \
https://${HAWKULAR_HOST}/hawkular/alerts/triggers | python -mjson.tool
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment