Skip to content

Instantly share code, notes, and snippets.

View swamibluedata's full-sized avatar

Swami Viswanathan swamibluedata

View GitHub Profile
# With kube-flow istio setup, fsGroup is being modified by Istio to 1337, this causes issues with notebook connecting to
# pvcs
#! /bin/bash
NAMESPACE="$1"
NOTEBOOK="$2"
# This is default user and group id of kubeflow notebook container
JOVYAN_USER="1000"
USERS_GROUP="100"
NAMESPACE="hpecp-bootstrap"
PVC_NAME="hpecp-bootstrap-harbor"
# Fetch pv name
PV_NAME=$(kubectl -n $NAMESPACE get pvc/$PVC_NAME -o jsonpath={.spec.volumeName})
# Fetch volume name
VOL_NAME=$(kubectl -n $NAMESPACE get pv/$PV_NAME -o jsonpath='{.spec.csi.volumeHandle}')
# From hcp controller, expand the volume
f().
ClientCertFromKubeconfig = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJZkY0OFZJcTRFZmN3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TURBek1EVXhPREl6TVRGYUZ3MHlNVEF6TURVeE9ESXpNVFJhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXVJL1c1eTA0NEZPUWlwSlEKclovSjBINW50VmhwbkQ5cUZNaWg0L3kyQVdKMDNBd3NmbU1HbE81d3RnZUNwSGRFZFdzQ05yWDE2N3Y3RUtWVApxUU1MWFBmN1Njd0Z4R1VNUVdaZUZHMTJjc2wzMmxkbEdMc1FtM24vSk5EM1NlZVU5N0Q2cHhMTHRQK2J3UE1wClZkRm8zZG1ER1MwUEtEd3RWY0prdVZNbU1jMFRaY3ovWmN0YUVySVpxUTZqZHdTZWM2NzQ1VDJiOG1nS0JkZ0kKRjhnS2pFQnVCNEFRbzd3OXRuQjhnWjczZUdZb203Q215MGJaYzRRQnBqUWk2V1YzL2tDeExHbW5hSVZDOFRoOQo3VldsTTFZaDB1WDdUbTlrVndwQnZOYVd6UXllTlU5K0RnZmVsRHUzTGdEY2xQM3NZYTB3WWNHci9xUXVDRjVmCkN1QkxoUUlEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFDZVBNV0toUERZeTlJQ3ByY2ErblRITFU3MjY5Y0MwNWc3agp
# Launch the container from bd_mgmt and invoke the following through a bdconfig command. Two parameters NEW_SIZE and INSTANCE_NAME
# docker run -d --name test bluedata/centos6
INSTANCE_NAME="test"
NEW_SIZE_GB="30"
NEW_SIZE=$(( $NEW_SIZE_GB*1024*1024*1024 ))
CONT_ID=$(docker inspect -f {{.Id}} $INSTANCE_NAME)
# Become super user
sudo su
# Install haproxy
yum install -y haproxy
# Generate a self-signed certificate
mkdir -p /etc/ssl/certs/haproxy
openssl genrsa -out /etc/ssl/certs/haproxy/server.key 1024
openssl req -new \
-subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=localhost" \
# Launch dashboard
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml
# Give full admin privileges to admin
cat >/tmp/dashboard-admin.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
# Create pv and pvc for two directories that epic requires. /opt/bluedata/db and /opt/bluedata/catalog/bundles
# Create two pvs from bd-nas3
NFS_SERVER="10.2.12.27"
NFS_SHARE="/jungle/kubernetes/swami/bd-epic/opt/catalog"
cat >/tmp/bd-epic-catalog.yaml << EOF
kind: PersistentVolume
apiVersion: v1
metadata:
name: bd-epic-catalog
# Setup keypair to access all your nodes from your mac
# Define the master node
MASTER_NODE="10.32.1.58"
# Define all minions
MINION_NODES="10.32.1.70 10.32.1.189"
# Define the pod network
POD_NETWORK_RANGE="192.168.0.0/16"
ALL_NODES="$MASTER_NODE $MINION_NODES"
# Set the container name here
INSTANCE_NAME="bluedata-400"
# Extract the device num from the instance name
INSTANCE_DEV_ID=$(echo $INSTANCE_NAME | cut -d"-" -f2)
# device name to use for the snapshot
SNAP_NAME=$INSTANCE_NAME-snap
# To create a snapshot device, we need to specify a unique device id. This has to be 24-bit number
# Going to start from 16484 as the base
# Set the container name here
INSTANCE_NAME="bluedata-12"
# Get the actual device mapper pool use by docker
POOL=$(docker info | grep pool | awk '{print $3}')
DEVMAPPER_POOL="/dev/mapper/$POOL"
DEV="$INSTANCE_NAME-dev"
# Get the container id of the instance
CONT_ID=$(docker inspect -f {{.Id}} $INSTANCE_NAME)