First show the dangerous role / rolebinding
kubectl get clusterrole cluster-admin -o yaml
kubectl get clusterrolebinding cluster-admin -o yaml
From my laptop
# create a role, and role binding
cat <<EOF | kubectl apply -f -
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: read-only
namespace: default
rules:
- apiGroups: [""]
resources: ["*"]
verbs: ["get", "watch", "list"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: read-only-binding
namespace: default
roleRef:
kind: Role
name: read-only
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: Group
name: read-only-group
EOF
from an EC2 instance with launched with a role
# download kubectl and install https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
# aws cli can setup ~/.kube/config
aws eks update-kubeconfig --name quick-cluster
# try to list the pods
kubectl get pods
return to admin laptop and create an access entry
ACCOUNT_ID=$(aws sts get-caller-identity --query "Account" --output text)
aws eks create-access-entry \
--cluster-name demo-cluster \
--principal-arn arn:aws:iam::${ACCOUNT_ID}:user/morgan \
--kubernetes-groups read-only-group
# eksctl create accessentry --cluster demo-cluster \
# --principal-arn arn:aws:iam::${ACCOUNT_ID}:user/morgan \
# --kubernetes-groups read-only-group
now go back to the ec2 instance and try list the pods
kubectl get pods