$ sudo yum install python-setuptools
$ sudo easy_install pip
$ sudo yum install python-devel
$ sudo pip install ansible
Hosts file.
- Behavioural params
- Groups
- Groups of Groups
- Assign variables
- Scale out in multiple files
- Static/Dynamic file
**Behavioural params
For example, to change the python interpreter path running on the remote machine you would update your inventory file as:
192.168.33.50 ansible_python_interpreter=/usr/bin/python2.7
.
├── production
│ ├── group_vars
│ │ ├── all (3)
│ │ └── db (2)
│ ├── host_vars
│ │ └── web1 (1)
│ └── inventory_prod
└── test
Order of Operation
- host_vars
host
- group_vars
group
- group_vars
all
When running ansible on hosts you have a few options to select groups:
- AND:
group1:&group2
Execute on hosts that only belong to both groups - OR:
group1:group1
Execute on all hosts in group1 and group2 - NOT:
!group1
Execute on all groups except for group1 - Wild Card
- Regular Expression
Order of Operation
$ANSIBLE_CONFIG
./ansible.cfg
(WORKING DIR)~/.ansible.cfg
/etc/ansible/ansible.cfg
(Only created when installed viapip
or a package manager)
You can also override default config values on the fly by using export ANSIBLE_<config_name>
i.e: export ANSIBLE_host_key_checking=True
$ ansible 172.31.39.146 -i inventory -u user -m ping -k
SSH password:
172.31.39.146 | SUCCESS => {
"changed": false,
"ping": "pong"
}
$ ansible 172.31.39.146 -i inventory -u user -m command -a "/bin/ls" -k
SSH password:
172.31.39.146 | SUCCESS | rc=0 >>
ansible
Desktop
script.sh
VNCHOWTO
xrdp-chansrv.log
With the shell
module you can leverage the shell
environment e.g: environment variables, etc.
Create a user
$ ansible webservers -i inventory_prod -m user -a "name={{username}} password=12345" --sudo
web1 | SUCCESS => {
"changed": true,
"comment": "",
"createhome": true,
"group": 1002,
"home": "/home/all_username",
"name": "all_username",
"password": "NOT_LOGGING_PASSWORD",
"shell": "/bin/bash",
"state": "present",
"system": false,
"uid": 1002
}
# Install package
$ ansible webservers -i inventory_prod -m yum -a "name=httpd state=present" --sudo
# Start service
$ ansible webservers -i inventory_prod -m service -a "name=httpd state=started enabled=yes" --sudo
# Download repo file rpm
$ ansible dbservers -i inventory_prod -m get_url -a "dest=/tmp url=http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm
# YUM local install
$ ansible dbservers -i inventory_prod -m yum -a "name=/tmp/mysql-community-release-el7-5.noarch.rpm state=present" --sudo
# Install package
$ ansible dbservers -i inventory_prod -m yum -a "name=mysql-server state=present" --sudo
# Start service
$ ansible dbservers -i inventory_prod -m service -a "name=mysqld state=started" --sudo
$ ansible web1 -i inventory_prod -m setup -a "filter=ansible_mounts"
web1 | SUCCESS => {
"ansible_facts": {
"ansible_mounts": [
{
"device": "/dev/xvda1",
"fstype": "xfs",
"mount": "/",
"options": "rw,seclabel,relatime,attr2,inode64,noquota",
"size_available": 15440408576,
"size_total": 21456445440,
"uuid": "0f790447-ebef-4ca0-b229-d0aa1985d57f"
},
{
"device": "/dev/xvda1",
"fstype": "xfs",
"mount": "/var/lib/docker/overlay",
"options": "rw,seclabel,relatime,attr2,inode64,noquota,bind",
"size_available": 15440408576,
"size_total": 21456445440,
"uuid": "0f790447-ebef-4ca0-b229-d0aa1985d57f"
}
]
},
"changed": false
}
$ ansible web1 -i inventory_prod -m setup --tree ./setup
.
└── setup
└── web1
---
- hosts: webservers
become: yes
gather_facts: False
tasks:
- name: Install apache webserver
yum: name=httpd state=present
- name: Start apache service
service: name=httpd state=started
- hosts: dbservers
become: yes
gather_facts: False
tasks:
- name: Fetch mysql repo rpm
get_url: dest=/tmp url=http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm
- name: Install mysql rpm
yum: name=/tmp/mysql-community-release-el7-5.noarch.rpm state=present
- name: Install mysql server
yum: name=mysql-server state=present
- name: Start mysql service
service: name=mysqld state=started
Run the above as ansible-playbook <playbook-name>.yaml
Override the host and group defined in the invetory file with command line arguments below:
$ ansible-playbook -i '52.64.66.6,' playbooks/bootstrap.yml --vault-password-file ~/vaultpassword --check --extra-vars 'ansible_ssh_user=ec2-user docker_group=52.64.66.6'
In the playbook you will need to set the hosts
argument to accept a variable to override the default group, in this case dockergroup
defined in the inventory file.
---
- hosts: "{{ docker_host | default('dockergroup') }}"
become: true
roles:
- docker
- name: Get files in a folder
find:
paths: "/var/www/html/wwwroot/somefolder/"
register: found_files
- name: Get latest file
set_fact:
latest_file: "{{ found_files.files | sort(attribute='mtime',reverse=true) | first }}"
- name: drop database
mysql_db: name={{ targetdbname }} state=absent
when: targetdeploydb == "new"
delegate_to: "{{ groups['dbserver'] | first }}"
run_once: true
- name: Coverity | Check port numbers are accessible from primary hosts
wait_for:
host: '{{ item.0 }}'
port: '{{ item.1 }}'
state: started # Port should be open
delay: 0 # No wait before first check (sec)
timeout: 3 # Stop checking after timeout (sec)
loop: "{{ coverity_host_ips_primary|product(ports)|list }}"
- name: Coverity | create primary hosts list with only IPs
set_fact:
coverity_host_ips_primary: "{{ coverity_host_ips_primary|default([]) + [item.value] }}"
loop: "{{ lookup('dict', coverity_endpoints_primary) }}"
Example:
backup_bsv_to_nfs_apps:
jira:
playbook: playbooks/jira_bsv_to_nfs.yml
bitbucket:
playbook: playbooks/bitbucket_bsv_to_nfs.yml
bamboo:
playbook: playbooks/bamboo_bsv_to_nfs.yml
confluence:
playbook: playbooks/confluence_bsv_to_nfs.yml
crowd:
playbook: playbooks/crowd_bsv_to_nfs.yml
- name: Create logrotate entry for app bsv to nfs backup scheduler
template:
src: ../templates/logrotate.d_bsv_to_nfs_backup_scheduler.j2
dest: "/etc/logrotate.d/{{ item.key }}_bsv_to_nfs_backup_scheduler"
owner: root
group: root
mode: 0644
with_items: "{{ lookup('dict', backup_bsv_to_nfs_apps) }}"
Value can be accessed as:
item.value.playbook
- name: Update config with dynamically retrieved master and slave IPs
set_fact:
keepalived_vrrp_config: "{{ ( keepalived_vrrp_config|default([]) ) + [ (item|combine( { 'master': {'SRC_IP': master_ip}, 'slave': {'SRC_IP': slave_ip } }, recursive=True)) ] }}"
loop: '{{ keepalived_vrrp }}'
The following is executed as the cloud-user
via Ansible. Although the second task uses become: true
it does not provide privileged access. This is because when using become
with become_user
(set globally) it switches the task execution user to cloud-user
.
- hosts: all
connection: local
gather_facts: no
become_user: 'cloud-user'
tasks:
- name: whoami
shell: 'whoami'
register: result
- debug:
var: result
- name: whoami
shell: 'whoami'
register: result
become: true
- debug:
var: result