Skip to content

Instantly share code, notes, and snippets.

@githubfoam
Last active June 15, 2022 11:44
Show Gist options
  • Save githubfoam/e5ce4b312bb121a500aa9e7e6fed8975 to your computer and use it in GitHub Desktop.
Save githubfoam/e5ce4b312bb121a500aa9e7e6fed8975 to your computer and use it in GitHub Desktop.
main.yml Cheat Sheet
--------------------------------------------------------------------------------------------------------------------
# =====LOCAL NOT REMOTE SSH==== ansible_connection=local
# operations center to remote control other hosts
[clients]
remotecontrol01 ansible_host=10.217.50.13 ansible_connection=local
[masters]
k8s-master01 ansible_host=10.217.50.10 ansible_connection=ssh ansible_ssh_private_key_file=/home/vagrant/.ssh/id_rsa ansible_user=vagrant
[workers]
worker01 ansible_host=10.217.50.11 ansible_ssh_private_key_file=/home/vagrant/.ssh/id_rsa ansible_connection=ssh ansible_user=vagrant
worker02 ansible_host=10.217.50.12 ansible_ssh_private_key_file=/home/vagrant/.ssh/id_rsa ansible_connection=ssh ansible_user=vagrant
# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
[k8scluster:children]
masters
workers
[all:vars]
ansible_python_interpreter=/usr/bin/python3
--------------------------------------------------------------------------------------------------------------------
#main.yml
---
- name: "OS fingerprinting "
debug:
msg:
- " ansible_os_family: {{ hostvars[inventory_hostname].ansible_os_family }} "
- " ansible_distribution: {{ hostvars[inventory_hostname].ansible_distribution }}"
- " ansible_distribution_major_version: {{ hostvars[inventory_hostname].ansible_distribution_major_version }}"
- " ansible_distribution_release: {{ hostvars[inventory_hostname].ansible_distribution_release }}"
- " ansible_distribution_version: {{ hostvars[inventory_hostname].ansible_distribution_version }}"
- name: "Add /etc/hosts entries"
include_tasks: hosts_entry.yml
- name: "Add /etc/resolv.conf entries"
include_tasks: resolv_entry.yml
- name: "Install required packages"
package:
name: "{{ common_default_debian_reqs }}"
state: present
when: " ansible_os_family == 'Debian' and ansible_distribution in ['Debian','Ubuntu'] "
- name: "Install required packages"
package:
name: "{{ common_default_redhat_reqs }}"
state: present
when: " ansible_os_family == 'RedHat' and ansible_distribution in ['CentOS','Scientific'] "
- name: "Install {{ ansible_distribution }}-{{ ansible_distribution_version }}"
include_tasks: "{{ ansible_distribution }}_{{ ansible_distribution_version }}_install.yml"
#hosts_entry.yml
---
- name: "Add mappings to /etc/hosts"
blockinfile:
path: /etc/hosts
block: |
{{ item.ip }} {{ item.namelong }} {{ item.nameshort }}
marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item.nameshort }}"
with_items:
- { ip: 192.168.20.9, namelong: vg-compute-01.local, nameshort: vg-compute-01 }
- { ip: 192.168.20.10, namelong: vg-compute-02.local, nameshort: vg-compute-02 }
- { ip: 192.168.20.11, namelong: vg-compute-03.local, nameshort: vg-compute-03 }
#resolv_entry.yml
---
- name: Add mappings to /etc/resolv.conf
# vagrant environment nodes
blockinfile:
path: /etc/resolv.conf
block: |
{{ item.name }} {{ item.ip }}
marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item.name }}"
with_items:
- { name: nameserver, ip: 8.8.8.8 }
#Ubuntu_20.04_install.yml
---
#https://docs.ansible.com/ansible/2.3/include_vars_module.html
- name: "Load a variable file based on the OS type, or a default if not found"
include_vars: "{{ item }}"
with_first_found:
- "{{ ansible_os_family }}.yml"
- "{{ ansible_distribution }}_{{ ansible_distribution_version }}.yml"
- default.yml
- name: "Install system packages"
package:
name: '{{ system_packages }}'
use: '{{ ansible_pkg_mgr }}'
state: present
--------------------------------------------------------------------------------------------------------------------
#main.yml specify directory
- import_tasks: roles/vscode/install.yml
--------------------------------------------------------------------------------------------------------------------
#hosts file
[all:vars]
ansible_python_interpreter=/usr/bin/python3
#main.yml file
- hosts: masters
become: yes
vars:
ansible_python_interpreter: /usr/bin/python3
--------------------------------------------------------------------------------------------------------------------
#problem
TASK [Gathering Facts] *********************************************************
[DEPRECATION WARNING]: Distribution Ubuntu 19.04 on host east-01 should use
/usr/bin/python3, but is using /usr/bin/python for backward compatibility with
prior Ansible releases. A future Ansible release will default to using the
discovered platform python for this host. See https://docs.ansible.com/ansible/
2.9/reference_appendices/interpreter_discovery.html for more information. This
feature will be removed in version 2.12. Deprecation warnings can be disabled
by setting deprecation_warnings=False in ansible.cfg.
ok: [east-01]
#vagrantfile
vpncluster.vm.box = "bento/ubuntu-19.04"
vpncluster.vm.provision "shell", inline: <<-SHELL
apt-get update && apt-get install python3 -y
SHELL
vpncluster.vm.provision "ansible_local" do |ansible|
#main.yml file
- hosts: masters
become: yes
vars:
ansible_python_interpreter: /usr/bin/python3
--------------------------------------------------------------------------------------------------------------------
# Example that prints return information from the previous task
# Debug Module Verbosity Control
# sudo ansible-playbook -i /vagrant/kube-cluster/hosts /vagrant/kube-cluster/05_dockers.yml -v verbosity: 1
# sudo ansible-playbook -i /vagrant/kube-cluster/hosts /vagrant/kube-cluster/05_dockers.yml -vv verbosity: 2
#
- shell: /usr/bin/uptime
register: result
- debug:
var: result
verbosity: 2
- name: Display all variables/facts known for a host
debug:
var: hostvars[inventory_hostname]
verbosity: 4
--------------------------------------------------------------------------------------------------------------------
@
- name: Reboot the machine
shell: nohup bash -c "sleep 2s && shutdown -r now" &
- name: Wait for machine to come back
wait_for_connection:
timeout: 240
delay: 20
@
name: Reboot server
command: /sbin/reboot
- name: Wait for the server to finish rebooting
sudo: no
local_action: wait_for host="{{ inventory_hostname }}" search_regex=OpenSSH port=22 timeout=300
@
- name: reboot host
command: /usr/bin/systemd-run --on-active=10 /usr/bin/systemctl reboot
async: 0
poll: 0
- name: wait for host sshd
local_action: wait_for host="{{ inventory_hostname }}" search_regex=OpenSSH port=22 timeout=300 delay=30
--------------------------------------------------------------------------------------------------------------------
- name: Run gluster peer probe fedora01
shell: gluster peer probe fedora01
when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '27' and ansible_distribution == 'Fedora'"
run_once: true
delegate_to: fedora02
----------------------------------------------------------------------------------------------------------------------
fedora27 selinux exception
- name: Install SELinux packages.This command has to be run under the root user
dnf: name={{item}} state=latest
with_items:
- libselinux-python
become: yes
when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '27' and ansible_distribution == 'Fedora'"
- name: Put SELinux in permissive mode, logging actions that would be blocked.
selinux:
policy: targeted
state: permissive
become: yes
when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '27' and ansible_distribution == 'Fedora'"
- name: Disable SELinux.This command has to be run under the root user
selinux:
state: disabled
become: yes
when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '27' and ansible_distribution == 'Fedora'"
- name: Uploading hosts file
template:
src: hosts.j2
dest: /etc/hosts
owner: root
group: root
mode: 0644
become: yes
when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '27' and ansible_distribution == 'Fedora'"
----------------------------------------------------------------------------------------------------------------------
to manage a system service (which requires root privileges) when connected as a non-root user (this takes advantage of the fact that the default value of become_user is root):
- name: Ensure the httpd service is running
service:
name: httpd
state: started
become: yes
To run a command as the apache user:
- name: Run a command as the apache user
command: somecommand
become: yes
become_user: apache
----------------------------------------------------------------------------------------------------------------------
- name: Download OFED-4.8 and extract
unarchive:
src: https://www.openfabrics.org/downloads/OFED/ofed-4.8/OFED-4.8.tgz
dest: /home/vagrant
remote_src: yes
----------------------------------------------------------------------------------------------------------------------
- lineinfile:
path: /etc/sudoers
regexp: '^Allow members of group sudo to execute any command '
insertafter: '^#any command '
line: 'stack ALL=(ALL) NOPASSWD:ALL'
----------------------------------------------------------------------------------------------------------------------
- debug:
msg: "System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}"
when: ansible_default_ipv4.gateway is defined
----------------------------------------------------------------------------------------------------------------------
- name: "shut down CentOS 7 systems"
command: reboot
when:
- ansible_distribution == "CentOS"
- ansible_distribution_major_version == "6"
- ansible_os_family == "RedHat"
- name: "shut down CentOS 7 systems"
command: reboot
when: "ansible_os_family == 'RedHat' and ansible_distribution == "CentOS"
- name: "shut down CentOS 7 systems"
command: reboot
when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7' and ansible_distribution == 'CentOS'"
----------------------------------------------------------------------------------------------------------------------
# transition from generic repo
[local]
name=Local DVD repository
baseurl=http://inverse.ca/downloads/PacketFence/RHEL$releasever/devel/$basearch
gpgcheck=0
enabled=1
- name: Add repository
yum_repository:
name: packetfence
description: PacketFence Repository
baseurl: "http://inverse.ca/downloads/PacketFence/RHEL$releasever/devel/$basearch"
gpgkey: "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-PACKETFENCE-CENTOS"
enabled: yes
gpgcheck: yes
----------------------------------------------------------------------------------------------------------------------
Example removing a repository and cleaning up metadata cache
- name: Remove repository (and clean up left-over metadata)
yum_repository:
name: docker-ce
state: absent
notify: yum-clean-metadata
- name: 1.Check if EPEL repo is already configured.
stat: path={{ epel_repofile_path }}
register: epel_repofile_result
- name: 2.Install EPEL repo.
yum:
name: "{{ epel_repo_url }}"
state: present
register: result
when: not epel_repofile_result.stat.exists
- name: 3.Import EPEL GPG key.
rpm_key:
key: "{{ epel_repo_gpg_key_url }}"
state: present
when: not epel_repofile_result.stat.exists
- name: Add EPEL repository
yum_repository:
name: epel
description: EPEL YUM repo
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
# Download and install EPEL for Centos/RHEL version 7
- name: Download EPEL Repo - Centos/RHEL 7
get_url: url=http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm dest=/tmp/epel-release-latest-7.noarch.rpm
when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7'"
- name: Install EPEL Repo - Centos/RHEL 7
command: rpm -ivh /tmp/epel-release-latest-7.noarch.rpm creates=/etc/yum.repos.d/epel.repo
when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7'"
----------------------------------------------------------------------------------------------------------------------
- name: Enable EPEL
yum: name=epel-release state=present
- name: Create a Docker group
group: name=docker state=present
- name: Add the vagrant user to Docker group
user: name=vagrant groups=docker append=yes
- name: Install Docker
yum: name=docker state=present
- name: Enable and Start Docker Daemon
service: name=docker state=started enabled=yes
----------------------------------------------------------------------------------------------------------------------
- name: Add repository
yum_repository:
name: epel
description: EPEL YUM repo
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
- name: "Add repository"
yum_repository:
name: Kubernetes # Unique repository ID
description: Kubernetes repo
baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled: yes
gpgcheck: yes
repo_gpgcheck: yes
gpgkey:
- https://packages.cloud.google.com/yum/doc/yum-key.gpg
- https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
- name: Add repository
yum_repository:
name: Docker CE Stable
description: Docker CE Stable
baseurl: https://download.docker.com/linux/centos/7/$basearch/stable
----------------------------------------------------------------------------------------------------------------------
- name: Set repos http://neuro.debian.net/lists/xenial.us-ca.full
get_url:
url: http://neuro.debian.net/lists/xenial.us-ca.full
dest: /etc/apt/sources.list.d/neurodebian.sources.list
mode: 644
when:
- ansible_distribution == "Ubuntu"
- ansible_os_family == "Debian"
- name: Receive repo keys hkp://pool.sks-keyservers.net:80
command: apt-key adv --recv-keys --keyserver hkp://pool.sks-keyservers.net:80 0xA5D32F012649A5A9
when:
- ansible_distribution == "Ubuntu"
- ansible_os_family == "Debian"
- name: Check if key exists
command: apt-key fingerprint 0xA5D32F012649A5A9
when:
- ansible_distribution == "Ubuntu"
- ansible_os_family == "Debian"
----------------------------------------------------------------------------------------------------------------------
- name: Download file with check (sha256)
get_url:
url: http://example.com/path/file.conf
dest: /etc/foo.conf
checksum: sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
----------------------------------------------------------------------------------------------------------------------
- name : adding stack user
user:
name: stack
shell: /bin/bash
home: /opt/stack
comment: "devstack installar sudo user"
----------------------------------------------------------------------------------------------------------------------
- name: Setting hostname
shell: "hostnamectl set-hostname {{ inventory_hostname }}"
- name: Uploading hosts file
template:
src: hosts.j2
dest: /etc/hosts
owner: root
group: root
mode: 0644
---hosts.j2 jinja2 template file---
127.0.0.1 localhost.localdomain localhost
::1 localhost.localdomain localhost
{% for host in groups.docker_cluster %}
{{ hostvars[host].ansible_host }} {{ host }}
{% endfor %}
---hosts.j2 jinja2 template file---
----------------------------------------------------------------------------------------------------------------------
multiple files
- name: "Configure Elasticsearch"
template:
src: "{{ item }}.j2"
dest: /etc/elasticsearch/{{ item }}
owner: root
group: elasticsearch
mode: 0660
with_items:
- elasticsearch.yml
- jvm.options
----------------------------------------------------------------------------------------------------------------------
- name: Install packages to allow apt to use a repository over HTTPS
apt: name={{item}} state=latest
with_items:
- build-essential
- python
- git
- dh-autoreconf
- debootstrap
#- sudo
#- man
#- vim
- autoconf
- squashfs-tools
- libtool
when:
- ansible_distribution == "Ubuntu"
- ansible_os_family == "Debian"
----------------------------------------------------------------------------------------------------------------------
- name: Update all packages to the latest version
apt:
upgrade: dist
when:
- ansible_os_family == "Debian"
- ansible_distribution == "Ubuntu"
- name: Run the equivalent of "apt-get update" as a separate step
apt:
update_cache: yes
when:
- ansible_distribution == "Ubuntu"
- ansible_os_family == "Debian"
----------------------------------------------------------------------------------------------------------------------
- name: "Install Apache"
apt: name={{ item }} state=present
with_items:
- apache2
- name: "Turn on Apache and set it to run on boot"
service: name={{ item }} state=started enabled=yes
with_items:
- apache2
----------------------------------------------------------------------------------------------------------------------
#add handler
Task named "Install Nginx" notifies handler named "Start Nginx"
cat installnginx_v1.yml
---
# hosts could have been "remote" or "all" as well
- hosts: local
connection: local
become: yes
become_user: root
tasks:
- name: Install Nginx
zypper:
name: nginx
state: present
update_cache: yes
disable_gpg_check: yes
notify:
- Start Nginx
handlers:
- name: Start Nginx
service:
name: nginx
state: started
# task stops service and notifies to remove nginx
cat removenginx_v1.yml
---
# hosts could have been "remote" or "all" as well
- hosts: local
connection: local
become: yes
become_user: root
tasks:
- name: Stop Nginx
service:
name: nginx
state: stopped
notify:
- Remove Nginx
handlers:
- name: Remove Nginx
zypper:
name: nginx
state: absent
update_cache: yes
-----------------------------------------------------------------------------------------------------
---
- hosts: localhost
become: yes # This means that all tasks will be executed with sudo
become_user: root
tasks:
- name: Remove java.he default CentOS version of Java is not compatible with Jenkins. Jenkins typically works best with a Sun implementation of Java
yum:
name: java
state: absent
when:
- ansible_distribution == "Scientific"
#- ansible_distribution_major_version == "6"
- ansible_os_family == "RedHat"
-----------------------------------------------------------------------------------------------------
What if you want to use a reference to a file based on the first file found that matches a given criteria, and some of the filenames are determined by variable names?
- name: INTERFACES | Create Ansible header for /etc/network/interfaces
template:
src: "{{ item }}"
dest: "/etc/foo.conf"
with_first_found:
- "{{ ansible_virtualization_type }}_foo.conf"
- "default_foo.conf"
- name: resolve platform specific vars
include_vars: "{{item}}"
with_first_found:
- "{{ansible_distribution}}-{{ansible_distribution_release}}.yml"
- "{{ansible_distribution}}.yml"
- "{{ansible_os_family}}.yml"
- debug: var=hostvars['localhost']['ansible_distribution']
- debug: var=hostvars['localhost']['ansible_distribution_release']
- debug: var=hostvars['localhost']['ansible_os_family']
-----------------------------------------------------------------------------------------------------
- name: add yum repo conf
become: yes
become_user: root
with_items:
- f: ceph.repo
d: /etc/yum.repos.d
m: 0644
template:
src: '{{item.f}}.j2'
dest: '{{item.d}}/{{item.f}}'
mode: '{{item.m}}'
-----------------------------------------------------------------------------------------------------
- name: (CentOS) install EPEL
become: yes
become_user: root
when: ansible_distribution in ['CentOS','RedHat']
package: name='epel-release' state=present
-----------------------------------------------------------------------------------------------------
- name: Create a new primary partition. /dev/sdb1. This command has to be run under the root user
parted:
device: /dev/sdb
number: 1
state: present
when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '27' and ansible_distribution == 'Fedora'"
- name: Create a xfs filesystem on /dev/sdb1. This command has to be run under the root user
filesystem:
fstype: xfs
dev: /dev/sdb1
when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '27' and ansible_distribution == 'Fedora'"
- name: create a directory /data/brick1 if it doesn't exist.
file:
path: /data/brick1
state: directory
mode: 0755
when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '27' and ansible_distribution == 'Fedora'"
- name: Mount up device /dev/sdb1
mount:
path: /data/brick1
src: /dev/sdb1
fstype: xfs
#opts: rw, suid, dev, exec, auto, nouser, async.
state: mounted
when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '27' and ansible_distribution == 'Fedora'"
-----------------------------------------------------------------------------------------------------
- name: Make sure a docker service is running and enabled
systemd: state=started name=docker enabled=yes
-----------------------------------------------------------------------------------------------------
- name: Add specified repository into sources list.
apt_repository:
repo: deb http://apt.kubernetes.io/ kubernetes-xenial main
filename: kubernetes
state: present
-----------------------------------------------------------------------------------------------------
- name: Start and enable firewalld
systemd:
name: firewalld.service
state: started
enabled: yes
- name: Allow qemu+tcp libvirtd listening port
firewalld:
port: 16509/tcp
state: enabled
permanent: yes
immediate: yes
-----------------------------------------------------------------------------------------------------
environment variable
- name: "Set Path for zeek"
lineinfile: dest='/etc/profile' regexp='^#?\s*export PATH=/opt/zeek(.*)$' line='export PATH=/opt/zeek/bin:$PATH' state=present
-----------------------------------------------------------------------------------------------------
# defaults/main.yml
---
# defaults file for common
suricata_default_pre_reqs:
- libpcre3
# tasks/main.yml
- name: "Before you can build Suricata for your system, run the following command to ensure that you have everything you need for the installation"
package:
name: "{{ suricata_default_pre_reqs }}"
state: present
-----------------------------------------------------------------------------------------------------
# vars/main.yml
---
# vars file for common
suricata_version: 3.1
# tasks/main.yml
- name: "Extract the latest version of Suricata into /tmp"
unarchive:
src: https://www.openinfosecfoundation.org/download/suricata-{{ suricata_version }}.tar.gz
-----------------------------------------------------------------------------------------------------
- name: "Touch a file"
file:
path: /tmp/fileA.conf
state: touch
- name: "Create a symbolic link"
file:
src: /tmp/fileA.conf
dest: /tmp/symbolA.conf
state: link
-----------------------------------------------------------------------------------------------------
tarball idempotence
- name: "Extract the latest version of Suricata into /tmp"
unarchive:
src: https://www.openinfosecfoundation.org/download/suricata-{{ suricata_version }}.tar.gz
dest: /tmp
creates: /tmp/suricata-{{ suricata_version }}/LICENSE
remote_src: yes
- name: "Configure suricata"
shell: ./configure chdir=/tmp/suricata-{{ suricata_version }} >> /tmp/surricata_configure.out
args:
chdir: /tmp
creates: surricata_configure.out
- name: "Make suricata"
shell: make chdir=/tmp/suricata-{{ suricata_version }} >> /tmp/surricata_make.out
args:
chdir: /tmp
creates: surricata_make.out
- name: "Make install-full suricata"
shell: make install-full chdir=/tmp/suricata-{{ suricata_version }} >> /tmp/surricata_makeinstallfull.out
args:
chdir: /tmp
creates: surricata_makeinstallfull.out
-----------------------------------------------------------------------------------------------------
- name: "nipper --version"
shell: nipper --version
register: nipperreg
- debug: msg={{ nipperreg.stdout }}
- debug: var=nipperreg
-----------------------------------------------------------------------------------------------------
- name: "Installing zeek from source"
git:
repo: 'https://github.com/zeek/zeek.git'
dest: /tmp/checkout
version: v3.0.0
update: no # Example just ensuring the repo checkout exists
- name: "Configure zeek"
shell: ./configure chdir=/tmp/checkout >> /tmp/zeek_configure.out
args:
chdir: /tmp
creates: zeek_configure.out
- name: "Make zeek"
shell: make chdir=/tmp/checkout >> /tmp/zeek_make.out
args:
chdir: /tmp
creates: zeek_make.out
- name: "Make install zeek"
shell: make install chdir=/tmp/checkout >> /tmp/zeek_makeinstall.out
args:
chdir: /tmp
creates: zeek_makeinstall.out
-----------------------------------------------------------------------------------------------------
- name: "Receive repo keys https://artifacts.elastic.co/GPG-KEY-elasticsearch"
apt_key:
url: https://artifacts.elastic.co/GPG-KEY-elasticsearch
state: present
- name: "Set repos https://artifacts.elastic.co/packages/5.x/apt"
apt_repository:
repo: "deb https://artifacts.elastic.co/packages/5.x/apt stable main"
state: present
filename: elastic-5.x
------------------------------------------------------------------------------------------------------
name: "Disable swapoff"
shell: swapoff -a
- name: "Disable swapoff permanently"
replace:
path: /etc/fstab
regexp: '^(\s*)([^#\n]+\s+)(\w+\s+)swap(\s+.*)$'
replace: '#\1\2\3swap\4'
backup: yes
------------------------------------------------------------------------------------------------------
set JAVA_HOME environment variable per play
- name: "Install moloch"
gather_facts: true
environment:
JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
hosts: vg-moloch-01
become: yes
roles:
- moloch
------------------------------------------------------------------------------------------------------
set JAVA_HOME environment variable per task
tasks:
- shell: echo JAVA_HOME is $JAVA_HOME
environment:
JAVA_HOME: /usr/java/jre1.8.0_51
register: shellout
- debug: var=shellout
-----------------------------------------------------------------------------------------------------
defaults/main.yml
# oracle java 13
oracle_java_apt_repository: 'ppa:linuxuprising/java'
oracle_java_deb_package: 'oracle-java13-installer'
oracle_java_license_version: 'shared/accepted-oracle-license-v1-2'
oracle_java_cache_valid_time: 3600
oracle_java_state: latest
oracle_java_debconf_package_default: 'oracle-java13-set-default'
oracle_java_apt_repository_key: "73C3DB2A"
tasks/main.yml
- name: "debian | ensure the apt repository key is present"
apt_key:
id="{{ oracle_java_apt_repository_key }}"
keyserver=keyserver.ubuntu.com
state=present
- name: "debian | ensure the apt repository is present"
apt_repository:
repo="{{ oracle_java_apt_repository }}"
update_cache=yes
state=present
- name: "debian | set license as accepted"
debconf:
name="{{ oracle_java_deb_package }}"
question="{{ oracle_java_license_version }}"
value='true'
vtype='select'
- name: "debian | ensure Java is installed"
apt:
name="{{ oracle_java_deb_package }}"
state={{ oracle_java_state }}
cache_valid_time={{ oracle_java_cache_valid_time }}
update_cache=yes
- name: "debian | set Java version as default"
apt:
name="{{ oracle_java_debconf_package_default }}"
state=present
-----------------------------------------------------------------------------------------------------
- name: "Copy multiple files "
copy:
src: "{{ item }}"
dest: /etc/fooapp/
owner: root
mode: 600
with_items:
- dest_dir
- name: "Copy multiple files to multiple directories"
copy: src={{ item.0 }} dest={{ item.1 }}
with_together:
- [ 'file1', 'file2', 'file3' ]
- [ '/dir1/', '/dir2/', '/dir3/' ]
# role files directory
- copy:
src: test.txt
dest: '{{ some_destination }}'
-----------------------------------------------------------------------------------------------------
# keep verbose output but hide sensitive information
- hosts: all
no_log: True
- name: secret task
shell: /usr/bin/do_something --value={{ secret_value }}
no_log: True
-----------------------------------------------------------------------------------------------------
#create list of users in a loop
$ cat create_user_loop.yaml
---
- name: "Create users in a loop"
hosts: all
become: true
tasks:
- user:
name: "{{ item }}"
state: present
loop:
- ansibleadm1
- ansibleadm2
-----------------------------------------------------------------------------------------------------
#create list of users in a loop
$ cat create_group_loop.yaml
---
- name: creating groups with loop
hosts: all
become: true
tasks:
- group:
name: "{{ item }}"
state: present
loop:
- group1
- group2
--------------------------------------------------------------------------------------------------------------------
- name: "Create User"
# In most cases, you can use the short module name user even without specifying the collections: keyword.
# we recommend you use the FQCN for easy linking to the module documentation and to avoid conflicting with other collections that may have the same module name
ansible.builtin.user:
name: "{{ username }}"
state: present
shell: /bin/bash
password: "{{ userpass | password_hash('sha512') }}"
update_password: on_create
groups: "{{ super_group }}"
append: yes
-----------------------------------------------------------------------------------------------------
Problem:
{"changed": false, "msg": "Either user must exist or you must provide full path to key file in check mode"}
Fix:
- name: "Deploy SSH Public Key"
authorized_key:
user: "{{ username }}"
state: present
key: "{{ lookup('file', '/home/{{ username }}/.ssh/id_rsa.pub') }}"
when: not ansible_check_mode
-----------------------------------------------------------------------------------------------------
#ansible-playbook -i inventory.ini create_user.yaml
#the user account guest, which is member of the admin group with sudo rights.
#A key pair with the same name as the account (guest.key and guest.key.pub) needs to be created and stored in the directory keyfiles before running the playbook with
---
- hosts: webservers
remote_user: ubuntu
become: true
vars:
users:
- username: "guest"
groups: "admin"
remove_users:
- "test"
handlers:
- name: "Restart sshd"
service:
name: "sshd"
state: "restarted"
tasks:
- name: "Create user accounts"
user:
name: "{{ item.username }}"
groups: "{{ item.groups }}"
state: "present"
with_items: "{{ users }}"
- name: "Remove old user accounts in remove_users"
user:
name: "{{ item }}"
state: "absent"
with_items: "{{ remove_users }}"
- name: "Add authorized keys"
authorized_key:
user: "{{ item.username }}"
key: "{{ lookup('file', 'keyfiles/'+ item.username + '.key.pub') }}"
with_items: "{{ users }}"
- name: "Allow admin users to sudo without a password"
lineinfile:
dest: "/etc/sudoers" # path: in version 2.3
state: "present"
regexp: "^%admin"
line: "%admin ALL=(ALL) NOPASSWD: ALL"
- name: "Disable root login via SSH"
lineinfile:
dest: "/etc/ssh/sshd_config"
regexp: "^PermitRootLogin"
line: "PermitRootLogin no"
notify: "Restart sshd"
-----------------------------------------------------------------------------------------------------
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment