Skip to content

Instantly share code, notes, and snippets.

@dmc5179
Created August 18, 2019 02:08
Show Gist options
  • Save dmc5179/37c5ca6633ca8f14bb21ded153110595 to your computer and use it in GitHub Desktop.
Save dmc5179/37c5ca6633ca8f14bb21ded153110595 to your computer and use it in GitHub Desktop.
Script to configure and setup OmniSci on OCP with GPU support
#!/bin/bash
#Copyright (C) 2019 Red Hat, Inc.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# Script for configuring an OCP baseline to run the OmniSci Demo
################################################################
#
# For the moment this script assumes a few things:
#
# 1) You have the OCP oc CLI command
# 2) The cluster is running
# 3) The oc CLI command is logged into the cluster as admin
# 4) You have an aws cli installed and configured to talk to the same account that the OCP cluster is running
#
#
################################################################
# SSH Key/PEM to get into the cluster
SSH_PRIVATE_KEY_FILE=
#Staging Directory
STAGING=
pushd "${STAGING}"
# Update the cluster to support GPU nodes
if [ -d "openshift-psap" ];
then
echo "Openshift PSAP dir exists, skipping"
else
git clone https://github.com/dmc5179/openshift-psap.git
fi
pushd openshift-psap
echo "Current Dir: $PWD"
# For now this is the elastic IP of the master and gpu node
# unfortunately this changes on each boot for the GPU node
MASTER_NODE="ec2-3-226-57-255.compute-1.amazonaws.com"
GPU_NODE="ec2-54-211-14-167.compute-1.amazonaws.com"
ssh-keyscan "${MASTER_NODE}" >> ~/.ssh/known_hosts
ssh-keyscan "${GPU_NODE}" >> ~/.ssh/known_hosts
ansible-playbook -i ./inventory/inventory -e hosts_to_apply="fast_nodes" ./playbooks/nouveau-blacklist.yaml
ansible-playbook -i ./inventory/inventory -e hosts_to_apply="fast_nodes" ./playbooks/nvidia-driver-install.yaml
ansible-playbook -i ./inventory/inventory -e hosts_to_apply="fast_nodes" ./playbooks/nvidia-container-runtime-hook.yaml
oc label node "${GPU_NODE}" "openshift.com/gpu-accelerator=true"
popd
# Create an EBS Volume
# Attach the EBS Volume to the node which will support a GPU
# Format the EBS Volume
# Update SELinux
# Probably have to force a pseudo terminal to be able to use sudo
oc rsh -t "${GPU_NODE}" sudo chcon -R unconfined_u:object_r:svirt_sandbox_file_t:s0 /mnt/local-storage/
if [ -d "openshift" ];
then
echo "Openshift dir exists, skipping"
else
git clone https://github.com/dmc5179/openshift.git
fi
pushd "openshift/storage_class/local"
# Update the cluster to support local storage class
bash create_local.sh
# Does this get me back to the staging dir or up one dir?
popd
pushd "openshift/omnisci/single_node"
oc new-project "omnisci"
oc adm policy add-scc-to-user anyuid -z default
# Create the PV attached to the local storage
oc get pv | grep "local"
# Create the PVC attached to the PV
#Deploy the OmniSci application
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment