Skip to content

Instantly share code, notes, and snippets.

@doevelopper
Last active May 20, 2020 16:15
Show Gist options
  • Save doevelopper/9c089d4973496465cfe762baa14fbead to your computer and use it in GitHub Desktop.
Save doevelopper/9c089d4973496465cfe762baa14fbead to your computer and use it in GitHub Desktop.
$> sudo apt update
$> sudo apt upgrade -y
$> sudo apt install -y vim soca
$> snap install microk8s --classic
$> sudo ufw default allow routed
$> sudo iptables -P FORWARD ACCEPT

$> sudo microk8s.enable dns dashboard ingress istio registry metrics-server storage

$> sudo snap alias microk8s.kubectl  kubectl
$> sudo snap alias microk8s.docker   docker
$> sudo snap alias microk8s.istioctl istioctl
  You may need to configure your firewall to allow pod-to-pod and pod-to-internet communication:
$> sudo ufw allow in on cbr0 && sudo ufw allow out on cbr0
$> sudo ufw default allow routed
$> sudo snap install helm --classic
$> microk8s.kubectl create serviceaccount --namespace kube-system tiller
$> microk8s.kubectl create clusterrolebinding tiller-cluster-rule   \
   --clusterrole=cluster-admin   --serviceaccount=kube-system:tiller
$> helm init --service-account tiller
$> mkdir ~/snap/helm/common/kube
$> microk8s.kubectl config view --raw > ~/snap/helm/common/kube/config

grafana/dashboard

  http://{IP}:8080/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy/
  http://{IP}:8080/api/v1/namespaces/kube-system/services/ \
   https:kubernetes-dashboard:/proxy/#!/pod?namespace=default
   http://{IP}:8080/api/v1/namespaces/ \
   kube-system/services/https:kubernetes-dashboard:/proxy/
$> kubectl label namespace default istio-injection=enabled
say yes when tls question shows up
$> kubectl create -f https://raw.githubusercontent.com/istio/istio/\
  release-1.0/samples/bookinfo/platform/kube/bookinfo.yaml
$> kubectl create -f https://raw.githubusercontent.com/istio/istio/\
  release-1.0/samples/bookinfo/networking/bookinfo-gateway.yaml
$> kubectl apply -f https://raw.githubusercontent.com/istio/istio/\
  release-1.0/samples/bookinfo/networking/destination-rule-all-mtls.yaml
$> kubectl -n istio-system get service istio-ingressgateway \
  -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}'

#istio metrics - http://IP:3000

$> kubectl -n istio-system port-forward --address 0.0.0.0 $(kubectl \
  -n istio-system get pod -l app=grafana -o jsonpath='{.items[0].metadata.name}') 3000:3000
istio tracing - http://IP:16686
$> kubectl -n istio-system port-forward --address 0.0.0.0 $(kubectl \
  -n istio-system get pod -l app=jaeger -o jsonpath='{.items[0].metadata.name}') 16686:16686
$> kubectl -n istio-system port-forward --address 0.0.0.0 $(kubectl \
  -n istio-system get pod -l app=servicegraph \
  -o jsonpath='{.items[0].metadata.name}') 8088:8088

intelligent routing

Notice that the reviews part of the page displays with no rating stars
 $> kubectl apply -f https://raw.githubusercontent.com/istio/istio/\
   release-1.0/samples/bookinfo/networking/virtual-service-all-v1.yaml

login as jason || login as caglar

 $> kubectl apply -f https://raw.githubusercontent.com/istio/istio/\
   release-1.0/samples/bookinfo/networking/virtual-service-reviews-test-v2.yaml
 $> kubectl delete -f https://raw.githubusercontent.com/istio/istio/\
   release-1.0/samples/bookinfo/networking/virtual-service-all-v1.yaml
@doevelopper
Copy link
Author

# Ensure there disk space to install all
sudo apt-get update
sudo apt-get upgrade
sudo apt-get dist-upgrade
sudo dpkg-reconfigure tzdata
sudo snap remove lxc
sudo snap remove lxd
sudo apt-get remove --purge lxc 
sudo apt-get remove --purge lxd 
sudo apt-get autoremove
# can throw error, ensure each purgue/uninstall above
sudo apt-add-repository ppa:ubuntu-lxc/stable
sudo apt-get update
sudo apt-get upgrade
sudo apt-get dist-upgrade
sudo apt-get install tmux lxc lxd zfsutils-linux 
df -h => 84% Free, 32G
{ SNAPSHOT - beforeLxdInit }
lxd init
    ipv6:none
ifconfig | grep flags
sudo sysctl -w net.ipv6.conf.ens33.disable_ipv6=1  
sudo sysctl -w net.ipv6.conf.lo.disable_ipv6=1  
sudo sysctl -w net.ipv6.conf.lxcbr0.disable_ipv6=1  
sudo sysctl -w net.ipv6.conf.lxdbr0.disable_ipv6=1  
time sudo snap install conjure-up --classic
{ SNAPSHOT - beforeConjureUp }
conjure-up => CHOICE = { microk8s }
alias kubectl='microk8s.kubectl'
#------------------------------------
# not necessary enable all but its a test
microk8s.enable storage
microk8s.enable registry    
microk8s.enable dns dashboard ingress istio metrics-server prometheus fluentd jaeger
#------------------------------------
time sudo snap install helm --classic
helm init
kubectl create serviceaccount --namespace kube-system tiller
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
helm search
# Before update the repo it throw an error:
helm version
    Error: could not find a ready tiller pod 
# Then update the repo:
helm repo update
# After update the repo it was OK:
helm version
    Client: &version.Version { 
            SemVer:"v2.13.0", 
            GitCommit:"79d07943b03aea2b76c12644b4b54733bc5958d6",
            GitTreeState:"clean"
        }
    Server: &version.Version { 
            SemVer:"v2.13.0", 
            GitCommit:"79d07943b03aea2b76c12644b4b54733bc5958d6", 
            GitTreeState:"clean" 
        }
#------------------------------------
helm install stable/mysql
df -h | grep sda {
    Filesystem:/dev/sda2,
    Size:40G,  
    Used:12G,  
    Avail:26G, 
    Use%:31% 
    Mounted-on:/
    }
{ SNAPSHOT - afterFixErrorBeforeEclipseChe }
#------------------------------------
========================================================================================================================
# Looks like it added a messy OverlayFS
df -h
    Filesystem      Size  Used Avail Use% Mounted on
    udev            1.9G     0  1.9G   0% /dev
    tmpfs           393M  2.5M  390M   1% /run
    /dev/sda2        40G   12G   26G  31% /
    tmpfs           2.0G     0  2.0G   0% /dev/shm
    tmpfs           5.0M     0  5.0M   0% /run/lock
    tmpfs           2.0G     0  2.0G   0% /sys/fs/cgroup
    /dev/loop0       91M   91M     0 100% /snap/core/6350
    tmpfs           393M     0  393M   0% /run/user/1000
    tmpfs           100K     0  100K   0% /var/lib/lxd/shmounts
    tmpfs           100K     0  100K   0% /var/lib/lxd/devlxd
    /dev/loop1      110M  110M     0 100% /snap/conjure-up/1045
    /dev/loop2      205M  205M     0 100% /snap/microk8s/492
    shm              64M     0   64M   0% /var/snap/microk8s/common/run/containerd/io.containerd.grpc.v1.cri/sandboxes$
    overlay          40G   12G   26G  31% /var/snap/microk8s/common/run/containerd/io.containerd.runtime.v1.linux/k8s.$
    overlay          40G   12G   26G  31% /var/snap/microk8s/common/run/containerd/io.containerd.runtime.v1.linux/k8s.$
    shm              64M     0   64M   0% /var/snap/microk8s/common/run/containerd/io.containerd.grpc.v1.cri/sandboxes$
    overlay          40G   12G   26G  31% /var/snap/microk8s/common/run/containerd/io.containerd.runtime.v1.linux/k8s.$
    shm              64M     0   64M   0% /var/snap/microk8s/common/run/containerd/io.containerd.grpc.v1.cri/sandboxes$
    overlay          40G   12G   26G  31% /var/snap/microk8s/common/run/containerd/io.containerd.runtime.v1.linux/k8s.$
    shm              64M     0   64M   0% /var/snap/microk8s/common/run/containerd/io.containerd.grpc.v1.cri/sandboxes$
    overlay          40G   12G   26G  31% /var/snap/microk8s/common/run/containerd/io.containerd.runtime.v1.linux/k8s.$
    shm              64M     0   64M   0% /var/snap/microk8s/common/run/containerd/io.containerd.grpc.v1.cri/sandboxes$
    overlay          40G   12G   26G  31% /var/snap/microk8s/common/run/containerd/io.containerd.runtime.v1.linux/k8s.$
    shm              64M     0   64M   0% /var/snap/microk8s/common/run/containerd/io.containerd.grpc.v1.cri/sandboxes$
    overlay          40G   12G   26G  31% /var/snap/microk8s/common/run/containerd/io.containerd.runtime.v1.linux/k8s.$
    overlay          40G   12G   26G  31% /var/snap/microk8s/common/run/containerd/io.containerd.runtime.v1.linux/k8s.$
    shm              64M  4.7M   60M   8% /var/snap/microk8s/common/run/containerd/io.containerd.grpc.v1.cri/sandboxes$
    overlay          40G   12G   26G  31% /var/snap/microk8s/common/run/containerd/io.containerd.runtime.v1.linux/k8s.$
    shm              64M  4.7M   60M   8% /var/snap/microk8s/common/run/containerd/io.containerd.grpc.v1.cri/sandboxes$
    overlay          40G   12G   26G  31% /var/snap/microk8s/common/run/containerd/io.containerd.runtime.v1.linux/k8s.$
========================================================================================================================

kubectl run eclipseche --image=eclipse/che-server:nightly
    deployment.apps/eclipseche2 created
    ------------------------------------
    # Cant found a way to follow the advise below, cant find the equivalent syntax
    kubectl run --generator=deployment/apps.v1 
    is DEPRECATED and will be removed in a future version. 
    Use 
    kubectl run --generator=run-pod/v1 
    or 
    kubectl create instead

kubectl get pods
    NAME                                      READY   STATUS    RESTARTS   AGE
    brown-hyena-mysql-75f584d69d-rbfv4        1/1     Running   0          72m
    default-http-backend-5769f6bc66-z7jb4     1/1     Running   0          91m
    eclipseche-589954dc99-d4bxm               1/1     Running   0          6m13s
    nginx-ingress-microk8s-controller-p88nm   1/1     Running   0          91m

kubectl get svc
    NAME                   TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
    brown-hyena-mysql      ClusterIP   10.152.184.38   <none>        3306/TCP   74m
    default-http-backend   ClusterIP   10.152.184.99   <none>        80/TCP     93m
    kubernetes             ClusterIP   10.152.184.1    <none>        443/TCP    99m

microk8s.kubectl describe pod eclipseche-589954dc99-d4bxm | grep "IP:"
    IP:  10.1.1.54

sudo apt-get install net-tools nmap

nmap 10.1.1.54 | grep open
    8080/tcp open  http-proxy

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment