Skip to content

Instantly share code, notes, and snippets.

View lioneltchami's full-sized avatar
🏠
Working from home

Lionel Tchami lioneltchami

🏠
Working from home
View GitHub Profile
$ kubectl get pod -l app.kubernetes.io/name=nginx -o jsonpath='{.items[0].spec.containers[0].image}'
nginx:1.21.6
$ helm history nginx
REVISION UPDATED STATUS CHART APP VERSION DESCRIPTION
1 Mon Mar 14 12:07:33 2022 superseded nginx-0.1.0 1.0.0 Install complete
2 Mon Mar 14 12:08:25 2022 deployed nginx-0.1.0 1.0.0 Upgrade complete
$ kubectl get pod -l app.kubernetes.io/name=nginx -o jsonpath='{.items[0].spec.containers[0].image}'
nginx:1.21.5
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: myhpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: myapp
minReplicas: 1
RUN JENKINS AS NON-ROOT USER
First, Open “/etc/sysconfig/jenkins” file. Changing JENKINS_USER variable and Make sure user exists in your system.
The default, JENKINS_USER is Jenkins USER as below
[root@Devops ~]# cat /etc/sysconfig/jenkins | grep JENKINS_USER
JENKINS_USER="jenkins"
Changing JENKINS_USER to huupv USER.
pipeline {
agent any
stages {
stage('Cluster creation') {
steps {
withAWS(credentials:'aws-static') {
sh '''
eksctl create cluster \
resource "aws_iam_role" "kubernetes" {
name = "kubernetes"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [ { "Effect": "Allow", "Principal": { "Service": "ec2.amazonaws.com" }, "Action": "sts:AssumeRole" } ]
}
EOF
}
resource "aws_iam_role_policy" "kubernetes" {
resource "aws_security_group" "kubernetes" {
vpc_id = "${aws_vpc.kubernetes.id}"
name = "kubernetes"
# Allow all outbound
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
# Generate Certificates
data "template_file" "certificates" {
template = "${file("${path.module}/template/kubernetes-csr.json")}"
depends_on = ["aws_elb.kubernetes_api","aws_instance.etcd","aws_instance.controller","aws_instance.worker"]
vars {
kubernetes_api_elb_dns_name = "${aws_elb.kubernetes_api.dns_name}"
kubernetes_cluster_dns = "${var.kubernetes_cluster_dns}"
etcd0_ip = "${aws_instance.etcd.0.private_ip}"
...
controller0_ip = "${aws_instance.controller.0.private_ip}"
- name: Create etcd config dir
file: path=/etc/etcd state=directory
become: true
- name: Copy certificates
copy:
src: "{{ playbook_dir }}/../cert/{{ item }}"
dest: "/etc/etcd/"
become: true
with_items: