When something goes wrong I want to degrade the user experience (instead of returning a 500 - Server Error) And I want to be notified about the failure
Wrap non mission critical code:
provider "google" { | |
project = var.google_project_id | |
region = var.region | |
zone = var.az | |
} | |
resource "google_compute_instance" "k3s_master_instance" { | |
name = "k3s-master" | |
machine_type = "n1-standard-1" | |
tags = ["k3s", "k3s-master", "http-server", "https-server"] |
on run {input, parameters} -- remove this line if running in Script Editor and not with Automator | |
tell application "OBS" to activate | |
tell application "System Events" | |
tell process "OBS" | |
set frontmost to true | |
if name of every menu item of menu "Tools" of menu bar 1 contains "Start Virtual Camera" then | |
click menu item "Start Virtual Camera" of menu "Tools" of menu bar 1 | |
end if | |
click (first button of window 1 whose role description is "minimize button") | |
end tell |
#!/bin/bash | |
export RANCHER_DOMAIN="rancher.yourdomain.com" | |
export RANCHER_TOKEN="token-xxxxx:xxxx" | |
export ACCESS_MODE="unrestricted" | |
export CONNECTION_TIMEOUT="5000" | |
export LDAP_HOST="ldap.yourdomain.com" | |
export LDAP_PORT="636" | |
export TLS="true" | |
export SA_DN="uid=x,ou=x,o=x,dc=yourdomain,dc=com" | |
export SA_PW="sa_password" |
This is not official documentation/tooling, use with caution
This generate the Kubernetes definitions of the cattle-cluster-agent
Deployment and cattle-node-agent
DaemonSet, in case it's accidentally removed/server-url was changed/certficates were changed. It is supposed to run on every cluster Rancher manages. If you have custom clusters created in Rancher, see Kubeconfig for Custom clusters created in Rancher
how to obtain the kubeconfig to directly talk to the Kubernetes API (as usually it doesn't work via Rancher anymore). For other clusters, use the tools provided by the provider to get the kubeconfig.
IMPORTANT: You get the cluster/node agents definitions from Rancher, and you apply them to the cluster that is created/managed so you need to switch kubeconfig to point to that cluster before applying them.
#!/usr/bin/env bash | |
cmd=$1 | |
chart=$2 | |
env=$3 | |
dir=${chart}-kustomize | |
chart=${chart/.\//} | |
build() { |
variable "hcloud_token" { | |
} | |
provider "hcloud" { | |
token = "${var.hcloud_token}" | |
} | |
resource "hcloud_server" "kube-master" { | |
name = "kube-master" | |
image = "ubuntu-18.04" |
--- | |
kind: ClusterRole | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
metadata: | |
name: traefik-ingress-controller | |
rules: | |
- apiGroups: | |
- "" | |
resources: | |
- services |
#!/usr/bin/env bash | |
set -e | |
set -u | |
set -o pipefail | |
show_help() { | |
cat << EOF | |
Usage: $(basename "$0") <options> | |
-h, --help Display help |
#!/bin/bash | |
# Usage: ./get_kubeconfig_custom_cluster_rancher2.sh cluster_name | |
# Needs to be run on the server running `rancher/rancher` container | |
# Check if jq exists | |
command -v jq >/dev/null 2>&1 || { echo "jq is not installed. Exiting." >&2; exit 1; } | |
# Check if clustername is given | |
if [ -z "$1" ]; then | |
echo "Usage: $0 [clustername]" |