Created
September 9, 2020 23:55
-
-
Save richm/5e845da8ba249dba47a1fa8e4ac31d03 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
--- ./tests/tests_change_fs.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/tests_change_fs.yml.lsrout 2020-09-09 17:48:40.217752539 -0600 | |
@@ -4,9 +4,10 @@ | |
vars: | |
storage_safe_mode: false | |
mount_location: '/opt/test1' | |
volume_size: '5g' | |
- fs_after: "{{ (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') | ternary('ext4', 'xfs') }}" | |
+ fs_after: "{{ (ansible_distribution == 'RedHat' and ansible_distribution_major_version\ | |
+ \ == '6') | ternary('ext4', 'xfs') }}" | |
tasks: | |
- include_role: | |
name: linux-system-roles.storage | |
--- ./tests/test-verify-volume-encryption.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/test-verify-volume-encryption.yml.lsrout 2020-09-09 17:48:37.095733757 -0600 | |
@@ -14,19 +14,22 @@ | |
changed_when: false | |
- name: Verify the presence/absence of the LUKS device node | |
assert: | |
- that: "{{ storage_test_luks_dev.stat.exists and storage_test_luks_dev.stat.isblk | |
- if _storage_test_volume_present or storage_test_volume.type == 'disk' | |
- else | |
- not storage_test_luks_dev.stat.exists }}" | |
- msg: "Incorrect device node presence for volume {{ storage_test_volume.name }}" | |
+ that: "{{ storage_test_luks_dev.stat.exists and storage_test_luks_dev.stat.isblk\ | |
+ \ if _storage_test_volume_present or storage_test_volume.type == 'disk' else\ | |
+ \ not storage_test_luks_dev.stat.exists }}" | |
+ msg: "Incorrect device node presence for volume {{ storage_test_volume.name\ | |
+ \ }}" | |
when: storage_test_volume.encryption | |
- name: Verify that the raw device is the same as the device if not encrypted | |
assert: | |
- that: "{{ (storage_test_volume._device != storage_test_volume._raw_device)|bool == (storage_test_volume.encryption|bool) }}" | |
- msg: "Encryption not managed correctly for volume {{ storage_test_volume.name }}: {{ (storage_test_volume._device != storage_test_volume._raw_device) }} {{ storage_test_volume.encryption|bool }}" | |
+ that: "{{ (storage_test_volume._device != storage_test_volume._raw_device)|bool\ | |
+ \ == (storage_test_volume.encryption|bool) }}" | |
+ msg: "Encryption not managed correctly for volume {{ storage_test_volume.name\ | |
+ \ }}: {{ (storage_test_volume._device != storage_test_volume._raw_device)\ | |
+ \ }} {{ storage_test_volume.encryption|bool }}" | |
when: _storage_test_volume_present | |
- name: Make sure we got info about the LUKS volume if encrypted | |
assert: | |
@@ -35,57 +38,72 @@ | |
when: _storage_test_volume_present and storage_test_volume.encryption | |
- name: Verify the LUKS volume's device type if encrypted | |
assert: | |
- that: "{{ storage_test_blkinfo.info[storage_test_volume._device].type == 'crypt' }}" | |
+ that: "{{ storage_test_blkinfo.info[storage_test_volume._device].type == 'crypt'\ | |
+ \ }}" | |
when: _storage_test_volume_present and storage_test_volume.encryption | |
- name: Check LUKS version | |
assert: | |
- that: "{{ luks_dump.stdout|regex_search('^\\s+Version: ' + storage_test_volume.encryption_luks_version + '$') }}" | |
+ that: "{{ luks_dump.stdout|regex_search('^\\s+Version: ' + storage_test_volume.encryption_luks_version\ | |
+ \ + '$') }}" | |
msg: "Wrong LUKS version for volume {{ storage_test_volume.name }}" | |
when: _storage_test_volume_present and storage_test_volume.encryption_luks_version | |
- name: Check LUKS key size | |
assert: | |
- that: "{{ luks_dump.stdout|regex_search('^\\s+Key: ' + storage_test_volume.encryption_key_size|string + ' bits$') }}" | |
+ that: "{{ luks_dump.stdout|regex_search('^\\s+Key: ' + storage_test_volume.encryption_key_size|string\ | |
+ \ + ' bits$') }}" | |
msg: "Wrong key size for volume {{ storage_test_volume.name }}" | |
when: _storage_test_volume_present and storage_test_volume.encryption_key_size | |
- name: Check LUKS cipher | |
assert: | |
- that: "{{ luks_dump.stdout|regex_search('^\\s+Cipher: ' + storage_test_volume.encryption_cipher + '$') }}" | |
+ that: "{{ luks_dump.stdout|regex_search('^\\s+Cipher: ' + storage_test_volume.encryption_cipher\ | |
+ \ + '$') }}" | |
msg: "Wrong key size for volume {{ storage_test_volume.name }}" | |
when: _storage_test_volume_present and storage_test_volume.encryption_cipher | |
- set_fact: | |
- _storage_test_expected_crypttab_entries: "{{ (storage_test_volume.encryption and _storage_test_volume_present)|ternary(1, 0) }}" | |
- _storage_test_crypttab_entries: "{{ storage_test_crypttab.stdout_lines|map('regex_search', '^' + storage_test_volume._device|basename + ' .*$')|select('string')|list }}" | |
- _storage_test_expected_crypttab_key_file: "{{ storage_test_volume.encryption_key or '-' }}" | |
+ _storage_test_expected_crypttab_entries: "{{ (storage_test_volume.encryption\ | |
+ \ and _storage_test_volume_present)|ternary(1, 0) }}" | |
+ _storage_test_crypttab_entries: "{{ storage_test_crypttab.stdout_lines|map('regex_search',\ | |
+ \ '^' + storage_test_volume._device|basename + ' .*$')|select('string')|list\ | |
+ \ }}" | |
+ _storage_test_expected_crypttab_key_file: "{{ storage_test_volume.encryption_key\ | |
+ \ or '-' }}" | |
- name: Check for /etc/crypttab entry | |
assert: | |
- that: "{{ _storage_test_crypttab_entries|length == _storage_test_expected_crypttab_entries|int }}" | |
- msg: "Incorrect number of crypttab entries found for volume {{ storage_test_volume.name }}" | |
+ that: "{{ _storage_test_crypttab_entries|length == _storage_test_expected_crypttab_entries|int\ | |
+ \ }}" | |
+ msg: "Incorrect number of crypttab entries found for volume {{ storage_test_volume.name\ | |
+ \ }}" | |
- name: Validate the format of the crypttab entry | |
assert: | |
that: "{{ _storage_test_crypttab_entries[0].split()|length >= 3 }}" | |
- msg: "Incorrectly formatted crypttab line for volume {{ storage_test_volume.name }}" | |
+ msg: "Incorrectly formatted crypttab line for volume {{ storage_test_volume.name\ | |
+ \ }}" | |
when: _storage_test_expected_crypttab_entries|int == 1 | |
- name: Check backing device of crypttab entry | |
assert: | |
- that: "{{ _storage_test_crypttab_entries[0].split()[1] == storage_test_volume._raw_device }}" | |
- msg: "Incorrect backing device in crypttab entry for volume {{ storage_test_volume.name }}" | |
+ that: "{{ _storage_test_crypttab_entries[0].split()[1] == storage_test_volume._raw_device\ | |
+ \ }}" | |
+ msg: "Incorrect backing device in crypttab entry for volume {{ storage_test_volume.name\ | |
+ \ }}" | |
when: _storage_test_expected_crypttab_entries|int == 1 | |
- name: Check key file of crypttab entry | |
assert: | |
- that: "{{ _storage_test_crypttab_entries[0].split()[2] == _storage_test_expected_crypttab_key_file }}" | |
- msg: "Incorrect key file in crypttab entry for volume {{ storage_test_volume.name }}" | |
+ that: "{{ _storage_test_crypttab_entries[0].split()[2] == _storage_test_expected_crypttab_key_file\ | |
+ \ }}" | |
+ msg: "Incorrect key file in crypttab entry for volume {{ storage_test_volume.name\ | |
+ \ }}" | |
when: _storage_test_expected_crypttab_entries|int == 1 | |
- set_fact: | |
- _storage_test_expected_crypttab_entries: null | |
- _storage_test_crypttab_entries: null | |
- _storage_test_expected_crypttab_key_file: null | |
+ _storage_test_expected_crypttab_entries: | |
+ _storage_test_crypttab_entries: | |
+ _storage_test_expected_crypttab_key_file: | |
--- ./tests/tests_change_fs_use_partitions.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/tests_change_fs_use_partitions.yml.lsrout 2020-09-09 17:48:39.181746307 -0600 | |
@@ -5,9 +5,10 @@ | |
storage_safe_mode: false | |
storage_use_partitions: true | |
mount_location: '/opt/test1' | |
volume_size: '5g' | |
- fs_type_after: "{{ 'ext3' if (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') else 'ext4' }}" | |
+ fs_type_after: "{{ 'ext3' if (ansible_distribution == 'RedHat' and ansible_distribution_major_version\ | |
+ \ == '6') else 'ext4' }}" | |
tasks: | |
- include_role: | |
name: linux-system-roles.storage | |
--- ./tests/verify-role-results.yml 2020-09-03 18:14:46.370832516 -0600 | |
+++ ./tests/verify-role-results.yml.lsrout 2020-09-09 17:48:35.303722976 -0600 | |
@@ -11,9 +11,9 @@ | |
# | |
# Collect some information about the current state of the system. | |
# | |
- name: Collect info about the volumes. | |
- blockdev_info: | |
+ fedora.system_roles.blockdev_info: | |
register: storage_test_blkinfo | |
changed_when: false | |
- name: Read the /etc/fstab file for volume existence | |
@@ -37,9 +37,9 @@ | |
when: _storage_pools_list is defined and _storage_pools_list | length > 0 | |
- name: Clean up variable namespace | |
set_fact: | |
- storage_test_pool: null | |
+ storage_test_pool: | |
# | |
# Verify standalone volumes. | |
# | |
@@ -54,8 +54,8 @@ | |
# Clean up. | |
# | |
- name: Clean up variable namespace | |
set_fact: | |
- storage_test_fstab: null | |
- storage_test_crypttab: null | |
- storage_test_blkinfo: null | |
- storage_test_volume: null | |
+ storage_test_fstab: | |
+ storage_test_crypttab: | |
+ storage_test_blkinfo: | |
+ storage_test_volume: | |
--- ./tests/tests_fatals_raid_pool.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/tests_fatals_raid_pool.yml.lsrout 2020-09-09 17:48:37.930738780 -0600 | |
@@ -31,17 +31,17 @@ | |
type: lvm | |
raid_level: "fail" | |
state: present | |
volumes: | |
- - name: lv1 | |
- size: "{{ volume1_size }}" | |
- mount_point: "{{ mount_location1 }}" | |
- - name: lv2 | |
- size: "{{ volume2_size }}" | |
- mount_point: "{{ mount_location2 }}" | |
- - name: lv3 | |
- size: "{{ volume3_size }}" | |
- mount_point: "{{ mount_location3 }}" | |
+ - name: lv1 | |
+ size: "{{ volume1_size }}" | |
+ mount_point: "{{ mount_location1 }}" | |
+ - name: lv2 | |
+ size: "{{ volume2_size }}" | |
+ mount_point: "{{ mount_location2 }}" | |
+ - name: lv3 | |
+ size: "{{ volume3_size }}" | |
+ mount_point: "{{ mount_location3 }}" | |
- name: unreachable task | |
fail: | |
msg: UNREACH | |
--- ./tests/test-verify-volume-fstab.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/test-verify-volume-fstab.yml.lsrout 2020-09-09 17:48:39.680749309 -0600 | |
@@ -1,29 +1,40 @@ | |
--- | |
- name: Set some variables for fstab checking | |
set_fact: | |
- storage_test_fstab_id_matches: "{{ storage_test_fstab.stdout_lines|map('regex_search', '^' + storage_test_volume._mount_id + ' ')|select('string')|list }}" | |
- storage_test_fstab_mount_point_matches: "{{ storage_test_fstab.stdout_lines|map('regex_search', ' +' + storage_test_volume.mount_point + ' +')|select('string')|list if (storage_test_volume.mount_point|d('none',true) != 'none') else [] }}" | |
- storage_test_fstab_expected_id_matches: "{{ 1 if (_storage_test_volume_present and (storage_test_volume.mount_point|d('none',true) != 'none' or storage_test_volume.fs_type == 'swap')) else 0 }}" | |
- storage_test_fstab_expected_mount_point_matches: "{{ 1 if (_storage_test_volume_present and storage_test_volume.mount_point and storage_test_volume.mount_point.startswith('/')) else 0 }}" | |
+ storage_test_fstab_id_matches: "{{ storage_test_fstab.stdout_lines|map('regex_search',\ | |
+ \ '^' + storage_test_volume._mount_id + ' ')|select('string')|list }}" | |
+ storage_test_fstab_mount_point_matches: "{{ storage_test_fstab.stdout_lines|map('regex_search',\ | |
+ \ ' +' + storage_test_volume.mount_point + ' +')|select('string')|list if\ | |
+ \ (storage_test_volume.mount_point|d('none',true) != 'none') else [] }}" | |
+ storage_test_fstab_expected_id_matches: "{{ 1 if (_storage_test_volume_present\ | |
+ \ and (storage_test_volume.mount_point|d('none',true) != 'none' or storage_test_volume.fs_type\ | |
+ \ == 'swap')) else 0 }}" | |
+ storage_test_fstab_expected_mount_point_matches: "{{ 1 if (_storage_test_volume_present\ | |
+ \ and storage_test_volume.mount_point and storage_test_volume.mount_point.startswith('/'))\ | |
+ \ else 0 }}" | |
# device id | |
- name: Verify that the device identifier appears in /etc/fstab | |
assert: | |
- that: "{{ storage_test_fstab_id_matches|length == storage_test_fstab_expected_id_matches|int }}" | |
+ that: "{{ storage_test_fstab_id_matches|length == storage_test_fstab_expected_id_matches|int\ | |
+ \ }}" | |
msg: "Expected device identifier not found in /etc/fstab." | |
when: _storage_test_volume_present | |
# mount point | |
- name: Verify the fstab mount point | |
assert: | |
- that: "{{ storage_test_fstab_mount_point_matches|length == storage_test_fstab_expected_mount_point_matches|int }}" | |
- msg: "Expected number ({{ storage_test_fstab_expected_mount_point_matches }}) of entries with volume '{{ storage_test_volume.name }}' mount point not found in /etc/fstab." | |
+ that: "{{ storage_test_fstab_mount_point_matches|length == storage_test_fstab_expected_mount_point_matches|int\ | |
+ \ }}" | |
+ msg: "Expected number ({{ storage_test_fstab_expected_mount_point_matches }})\ | |
+ \ of entries with volume '{{ storage_test_volume.name }}' mount point not\ | |
+ \ found in /etc/fstab." | |
# todo: options | |
- name: Clean up variables | |
set_fact: | |
- storage_test_fstab_id_matches: null | |
- storage_test_fstab_mount_point_matches: null | |
- storage_test_fstab_expected_id_matches: null | |
- storage_test_fstab_expected_mount_point_matches: null | |
+ storage_test_fstab_id_matches: | |
+ storage_test_fstab_mount_point_matches: | |
+ storage_test_fstab_expected_id_matches: | |
+ storage_test_fstab_expected_mount_point_matches: | |
--- ./tests/tests_null_raid_pool.yml 2020-09-03 18:14:46.370832516 -0600 | |
+++ ./tests/tests_null_raid_pool.yml.lsrout 2020-09-09 17:48:37.211734455 -0600 | |
@@ -30,11 +30,11 @@ | |
type: lvm | |
raid_level: "null" | |
state: present | |
volumes: | |
- - name: lv1 | |
- size: "{{ volume1_size }}" | |
- mount_point: "{{ mount_location1 }}" | |
+ - name: lv1 | |
+ size: "{{ volume1_size }}" | |
+ mount_point: "{{ mount_location1 }}" | |
- name: get existing raids (after run) | |
command: "cat /proc/mdstat" | |
register: storage_test_mdstat2 | |
@@ -51,13 +51,13 @@ | |
type: lvm | |
raid_level: "null" | |
state: absent | |
volumes: | |
- - name: lv1 | |
- size: "{{ volume1_size }}" | |
- mount_point: "{{ mount_location1 }}" | |
+ - name: lv1 | |
+ size: "{{ volume1_size }}" | |
+ mount_point: "{{ mount_location1 }}" | |
- name: compare mdstat results | |
assert: | |
that: | |
- - storage_test_mdstat1.stdout == storage_test_mdstat2.stdout | |
+ - storage_test_mdstat1.stdout == storage_test_mdstat2.stdout | |
msg: "Raid created when it should not be" | |
--- ./tests/get_unused_disk.yml 2020-09-03 18:14:46.368832504 -0600 | |
+++ ./tests/get_unused_disk.yml.lsrout 2020-09-09 17:48:36.159728126 -0600 | |
@@ -1,7 +1,7 @@ | |
--- | |
- name: Find unused disks in the system | |
- find_unused_disk: | |
+ fedora.system_roles.find_unused_disk: | |
min_size: "{{ min_size | default(omit) }}" | |
max_return: "{{ max_return|default(omit) }}" | |
register: unused_disks_return | |
--- ./tests/tests_disk_errors.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/tests_disk_errors.yml.lsrout 2020-09-09 17:48:36.698731368 -0600 | |
@@ -74,11 +74,10 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output of the duplicate volumes test | |
assert: | |
- that: "blivet_output.failed and | |
- 'multiple volumes with the same name:' in blivet_output.msg and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and 'multiple volumes with the same name:'\ | |
+ \ in blivet_output.msg and not blivet_output.changed" | |
msg: "Unexpected behavior w/ multiple disk volumes using the same name" | |
- name: Create a file system on disk | |
include_role: | |
@@ -120,11 +119,10 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('cannot remove\ | |
+ \ existing formatting on volume.*in safe mode') and not blivet_output.changed" | |
msg: "Unexpected behavior w/ existing data on specified disks" | |
- name: Unmount file system | |
include_role: | |
@@ -161,11 +159,10 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('cannot remove\ | |
+ \ existing formatting on volume.*in safe mode') and not blivet_output.changed" | |
msg: "Unexpected behavior w/ existing data on specified disks" | |
- name: Remount file system | |
include_role: | |
@@ -184,15 +181,15 @@ | |
register: stat_r | |
- name: assert file presence | |
assert: | |
- that: | |
- stat_r.stat.isreg is defined and stat_r.stat.isreg | |
+ that: stat_r.stat.isreg is defined and stat_r.stat.isreg | |
msg: "data lost!" | |
- name: Test for correct handling of safe_mode | |
block: | |
- - name: Try to create a partition pool on the disk already containing a file system in safe_mode | |
+ - name: Try to create a partition pool on the disk already containing a | |
+ file system in safe_mode | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
storage_pools: | |
@@ -212,16 +209,17 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('cannot\ | |
+ \ remove existing formatting and/or devices on disk.*in safe mode')\ | |
+ \ and not blivet_output.changed" | |
msg: "Unexpected behavior w/ existing data on specified disks" | |
- name: Test for correct handling of safe_mode with existing filesystem | |
block: | |
- - name: Try to create LVM pool on disk that already belongs to an existing filesystem | |
+ - name: Try to create LVM pool on disk that already belongs to an existing | |
+ filesystem | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
storage_pools: | |
@@ -241,11 +239,11 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output | |
assert: | |
- that: "{{ blivet_output.failed and | |
- blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and | |
- not blivet_output.changed }}" | |
+ that: "{{ blivet_output.failed and blivet_output.msg|regex_search('cannot\ | |
+ \ remove existing formatting and/or devices on disk.*in safe mode')\ | |
+ \ and not blivet_output.changed }}" | |
msg: "Unexpected behavior w/ existing data on specified disks" | |
- name: stat the file | |
stat: | |
@@ -253,13 +251,13 @@ | |
register: stat_r | |
- name: assert file presence | |
assert: | |
- that: | |
- stat_r.stat.isreg is defined and stat_r.stat.isreg | |
+ that: stat_r.stat.isreg is defined and stat_r.stat.isreg | |
msg: "data lost!" | |
- - name: Create a partition pool on the disk already containing a file system w/o safe_mode | |
+ - name: Create a partition pool on the disk already containing a file system | |
+ w/o safe_mode | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
storage_safe_mode: false | |
--- ./tests/tests_luks.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/tests_luks.yml.lsrout 2020-09-09 17:48:38.935744827 -0600 | |
@@ -43,11 +43,10 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output of the keyless luks test | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('encrypted volume.*missing key') and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('encrypted\ | |
+ \ volume.*missing key') and not blivet_output.changed" | |
msg: "Unexpected behavior w/ encrypted pool w/ no key" | |
# encrypted disk volume | |
- name: Create an encrypted disk volume w/ default fs | |
@@ -125,11 +124,10 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output of the keyless luks test | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('encrypted volume.*missing key') and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('encrypted\ | |
+ \ volume.*missing key') and not blivet_output.changed" | |
msg: "Unexpected behavior w/ encrypted pool w/ no key" | |
- name: Create an encrypted partition volume w/ default fs | |
include_role: | |
@@ -238,11 +236,10 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output of the keyless luks test | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('encrypted volume.*missing key') and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('encrypted\ | |
+ \ volume.*missing key') and not blivet_output.changed" | |
msg: "Unexpected behavior w/ encrypted pool w/ no key" | |
- name: Create an encrypted lvm volume w/ default fs | |
include_role: | |
--- ./tests/test-verify-volume-fs.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/test-verify-volume-fs.yml.lsrout 2020-09-09 17:48:42.309765125 -0600 | |
@@ -2,12 +2,14 @@ | |
# type | |
- name: Verify fs type | |
assert: | |
- that: "{{ storage_test_blkinfo.info[storage_test_volume._device].fstype == storage_test_volume.fs_type }}" | |
+ that: "{{ storage_test_blkinfo.info[storage_test_volume._device].fstype == storage_test_volume.fs_type\ | |
+ \ }}" | |
when: storage_test_volume.fs_type and _storage_test_volume_present | |
# label | |
- name: Verify fs label | |
assert: | |
- that: "{{ storage_test_blkinfo.info[storage_test_volume._device].label == storage_test_volume.fs_label }}" | |
+ that: "{{ storage_test_blkinfo.info[storage_test_volume._device].label == storage_test_volume.fs_label\ | |
+ \ }}" | |
when: _storage_test_volume_present | |
--- ./tests/tests_luks_pool.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/tests_luks_pool.yml.lsrout 2020-09-09 17:48:42.605766906 -0600 | |
@@ -48,11 +48,10 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output of the keyless luks test | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('encrypted pool.*missing key') and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('encrypted\ | |
+ \ pool.*missing key') and not blivet_output.changed" | |
msg: "Unexpected behavior w/ encrypted pool w/ no key" | |
- name: Test key file handling | |
block: | |
--- ./tests/verify-pool-member-crypttab.yml 2020-09-03 18:14:46.370832516 -0600 | |
+++ ./tests/verify-pool-member-crypttab.yml.lsrout 2020-09-09 17:48:36.075727620 -0600 | |
@@ -1,28 +1,37 @@ | |
- set_fact: | |
- _storage_test_crypttab_entries: "{{ storage_test_crypttab.stdout_lines|map('regex_search', '^' + _storage_test_pool_member_path|basename + ' .*$')|select('string')|list }}" | |
+ _storage_test_crypttab_entries: "{{ storage_test_crypttab.stdout_lines|map('regex_search',\ | |
+ \ '^' + _storage_test_pool_member_path|basename + ' .*$')|select('string')|list\ | |
+ \ }}" | |
- name: Check for /etc/crypttab entry | |
assert: | |
- that: "{{ _storage_test_crypttab_entries|length == _storage_test_expected_crypttab_entries|int }}" | |
- msg: "Incorrect number of crypttab entries found for pool {{ storage_test_pool.name }} member {{ _storage_test_pool_member_path|basename }}" | |
+ that: "{{ _storage_test_crypttab_entries|length == _storage_test_expected_crypttab_entries|int\ | |
+ \ }}" | |
+ msg: "Incorrect number of crypttab entries found for pool {{ storage_test_pool.name\ | |
+ \ }} member {{ _storage_test_pool_member_path|basename }}" | |
- name: Validate the format of the crypttab entry | |
assert: | |
that: "{{ _storage_test_crypttab_entries[0].split()|length >= 3 }}" | |
- msg: "Incorrectly formatted crypttab line for volume {{ storage_test_pool.name }} member {{ _storage_test_pool_member_path|basename }}" | |
+ msg: "Incorrectly formatted crypttab line for volume {{ storage_test_pool.name\ | |
+ \ }} member {{ _storage_test_pool_member_path|basename }}" | |
when: _storage_test_expected_crypttab_entries|int == 1 | |
- name: Check backing device of crypttab entry | |
assert: | |
- that: "{{ _storage_test_crypttab_entries[0].split()[1] == storage_test_volume._raw_device }}" | |
- msg: "Incorrect backing device in crypttab entry for volume {{ storage_test_volume.name }}" | |
+ that: "{{ _storage_test_crypttab_entries[0].split()[1] == storage_test_volume._raw_device\ | |
+ \ }}" | |
+ msg: "Incorrect backing device in crypttab entry for volume {{ storage_test_volume.name\ | |
+ \ }}" | |
when: false and _storage_test_expected_crypttab_entries|int == 1 | |
- name: Check key file of crypttab entry | |
assert: | |
- that: "{{ _storage_test_crypttab_entries[0].split()[2] == _storage_test_expected_crypttab_key_file }}" | |
- msg: "Incorrect key file in crypttab entry for volume {{ storage_test_pool.name }} member {{ _storage_test_pool_member_path|basename }}" | |
+ that: "{{ _storage_test_crypttab_entries[0].split()[2] == _storage_test_expected_crypttab_key_file\ | |
+ \ }}" | |
+ msg: "Incorrect key file in crypttab entry for volume {{ storage_test_pool.name\ | |
+ \ }} member {{ _storage_test_pool_member_path|basename }}" | |
when: _storage_test_expected_crypttab_entries|int == 1 | |
- set_fact: | |
- _storage_test_crypttab_entries: null | |
+ _storage_test_crypttab_entries: | |
--- ./tests/test-verify-volume-mount.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/test-verify-volume-mount.yml.lsrout 2020-09-09 17:48:37.825738149 -0600 | |
@@ -4,43 +4,56 @@ | |
# MDRaid devices paths are returned as a symlinks. But sometimes we need their targets. | |
# | |
- name: Get expected mount device based on device type | |
set_fact: | |
- storage_test_device_path: "{{ storage_test_volume._kernel_device if _storage_test_volume_present and not storage_test_volume.encryption and storage_test_volume.raid_level else storage_test_volume._device }}" | |
+ storage_test_device_path: "{{ storage_test_volume._kernel_device if _storage_test_volume_present\ | |
+ \ and not storage_test_volume.encryption and storage_test_volume.raid_level\ | |
+ \ else storage_test_volume._device }}" | |
- name: Set some facts | |
set_fact: | |
# json_query(...) used instead of "|selectattr('device', 'equalto', storage_test_volume._device)|list" | |
# as that expression wouldn't work with Jinja versions <2.8 | |
- storage_test_mount_device_matches: "{{ ansible_mounts|json_query('[?device==`\"{}\"`]'.format(storage_test_device_path))}}" | |
- storage_test_mount_point_matches: "{{ ansible_mounts|json_query('[?mount==`\"{}\"`]'.format(storage_test_volume.mount_point))}}" | |
- storage_test_mount_expected_match_count: "{{ 1 if _storage_test_volume_present and storage_test_volume.mount_point and storage_test_volume.mount_point.startswith('/') else 0 }}" | |
- storage_test_swap_expected_matches: "{{ 1 if _storage_test_volume_present and storage_test_volume.fs_type == 'swap' else 0 }}" | |
+ storage_test_mount_device_matches: "{{ ansible_mounts|json_query('[?device==`\"\ | |
+ {}\"`]'.format(storage_test_device_path))}}" | |
+ storage_test_mount_point_matches: "{{ ansible_mounts|json_query('[?mount==`\"\ | |
+ {}\"`]'.format(storage_test_volume.mount_point))}}" | |
+ storage_test_mount_expected_match_count: "{{ 1 if _storage_test_volume_present\ | |
+ \ and storage_test_volume.mount_point and storage_test_volume.mount_point.startswith('/')\ | |
+ \ else 0 }}" | |
+ storage_test_swap_expected_matches: "{{ 1 if _storage_test_volume_present and\ | |
+ \ storage_test_volume.fs_type == 'swap' else 0 }}" | |
# | |
# Verify mount presence. | |
# | |
- name: Verify the current mount state by device | |
assert: | |
- that: "{{ storage_test_mount_device_matches|length == storage_test_mount_expected_match_count|int }}" | |
- msg: "Found unexpected mount state for volume '{{ storage_test_volume.name }}' device" | |
+ that: "{{ storage_test_mount_device_matches|length == storage_test_mount_expected_match_count|int\ | |
+ \ }}" | |
+ msg: "Found unexpected mount state for volume '{{ storage_test_volume.name }}'\ | |
+ \ device" | |
when: _storage_test_volume_present and storage_test_volume.mount_point | |
# | |
# Verify mount directory. | |
# | |
- name: Verify the current mount state by mount point | |
assert: | |
- that: "{{ storage_test_mount_point_matches|length == storage_test_mount_expected_match_count|int }}" | |
- msg: "Found unexpected mount state for volume '{{ storage_test_volume.name }}' mount point" | |
+ that: "{{ storage_test_mount_point_matches|length == storage_test_mount_expected_match_count|int\ | |
+ \ }}" | |
+ msg: "Found unexpected mount state for volume '{{ storage_test_volume.name }}'\ | |
+ \ mount point" | |
# | |
# Verify mount fs type. | |
# | |
- name: Verify the mount fs type | |
assert: | |
- that: "{{ storage_test_mount_point_matches[0].fstype == storage_test_volume.fs_type }}" | |
- msg: "Found unexpected mount state for volume '{{ storage_test_volume.name }} fs type" | |
+ that: "{{ storage_test_mount_point_matches[0].fstype == storage_test_volume.fs_type\ | |
+ \ }}" | |
+ msg: "Found unexpected mount state for volume '{{ storage_test_volume.name }}\ | |
+ \ fs type" | |
when: storage_test_mount_expected_match_count|int == 1 | |
# | |
# Verify swap status. | |
@@ -55,9 +68,11 @@ | |
when: storage_test_volume.fs_type == "swap" | |
- name: Verify swap status | |
assert: | |
- that: "{{ storage_test_swaps.stdout|regex_findall('^' + storage_test_sys_node.stdout + ' ', multiline=True)|list|length|int == storage_test_swap_expected_matches|int }}" | |
+ that: "{{ storage_test_swaps.stdout|regex_findall('^' + storage_test_sys_node.stdout\ | |
+ \ + ' ', multiline=True)|list|length|int == storage_test_swap_expected_matches|int\ | |
+ \ }}" | |
msg: "Unexpected number of matching active swaps" | |
when: storage_test_volume.fs_type == "swap" | |
# | |
@@ -65,10 +80,10 @@ | |
# | |
- name: Unset facts | |
set_fact: | |
- storage_test_mount_device_matches: null | |
- storage_test_mount_point_matches: null | |
- storage_test_mount_expected_match_count: null | |
- storage_test_swap_expected_matches: null | |
- storage_test_sys_node: null | |
- storage_test_swaps: null | |
+ storage_test_mount_device_matches: | |
+ storage_test_mount_point_matches: | |
+ storage_test_mount_expected_match_count: | |
+ storage_test_swap_expected_matches: | |
+ storage_test_sys_node: | |
+ storage_test_swaps: | |
--- ./tests/verify-pool-md.yml 2020-09-03 18:14:46.370832516 -0600 | |
+++ ./tests/verify-pool-md.yml.lsrout 2020-09-09 17:48:40.336753255 -0600 | |
@@ -9,39 +9,46 @@ | |
changed_when: false | |
# pre-chew regex search patterns | |
- set_fact: | |
- storage_test_md_active_devices_re: "{{('Active Devices : ' ~ storage_test_pool.raid_device_count ~ '\n')|regex_escape()}}" | |
+ storage_test_md_active_devices_re: "{{('Active Devices : ' ~ storage_test_pool.raid_device_count\ | |
+ \ ~ '\n')|regex_escape()}}" | |
when: storage_test_pool.raid_device_count is defined | |
- set_fact: | |
- storage_test_md_spare_devices_re: "{{('Spare Devices : ' ~ storage_test_pool.raid_spare_count ~ '\n')|regex_escape()}}" | |
+ storage_test_md_spare_devices_re: "{{('Spare Devices : ' ~ storage_test_pool.raid_spare_count\ | |
+ \ ~ '\n')|regex_escape()}}" | |
when: storage_test_pool.raid_spare_count is defined | |
- set_fact: | |
- storage_test_md_metadata_version_re: "{{('Version : ' ~ storage_test_pool.raid_metadata_version ~ '\n')|regex_escape()}}" | |
+ storage_test_md_metadata_version_re: "{{('Version : ' ~ storage_test_pool.raid_metadata_version\ | |
+ \ ~ '\n')|regex_escape()}}" | |
when: storage_test_pool.raid_metadata_version is defined | |
- name: check RAID active devices count | |
assert: | |
that: "storage_test_mdadm.stdout is regex(storage_test_md_active_devices_re)" | |
msg: "Expected {{ storage_test_pool.raid_device_count }} active RAID devices." | |
- when: storage_test_pool.raid_device_count is defined and storage_test_pool.raid_device_count is not none | |
+ when: storage_test_pool.raid_device_count is defined and storage_test_pool.raid_device_count | |
+ is not none | |
- name: check RAID spare devices count | |
assert: | |
that: "storage_test_mdadm.stdout is regex(storage_test_md_spare_devices_re)" | |
msg: "Expected {{ storage_test_pool.raid_spare_count }} spare RAID devices." | |
- when: storage_test_pool.raid_spare_count is defined and storage_test_pool.raid_spare_count is not none | |
+ when: storage_test_pool.raid_spare_count is defined and storage_test_pool.raid_spare_count | |
+ is not none | |
- name: check RAID metadata version | |
assert: | |
that: "storage_test_mdadm.stdout is regex(storage_test_md_metadata_version_re)" | |
- msg: "Expected {{ storage_test_pool.raid_metadata_version }} RAID metadata version." | |
- when: storage_test_pool.raid_metadata_version is defined and storage_test_pool.raid_metadata_version is not none | |
+ msg: "Expected {{ storage_test_pool.raid_metadata_version }} RAID metadata\ | |
+ \ version." | |
+ when: storage_test_pool.raid_metadata_version is defined and storage_test_pool.raid_metadata_version | |
+ is not none | |
when: storage_test_pool.raid_level != none and storage_test_pool.state != "absent" | |
- set_fact: | |
- storage_test_md_active_devices_re: null | |
- storage_test_md_spare_devices_re: null | |
- storage_test_md_metadata_version_re: null | |
+ storage_test_md_active_devices_re: | |
+ storage_test_md_spare_devices_re: | |
+ storage_test_md_metadata_version_re: | |
--- ./tests/tests_lvm_multiple_disks_multiple_volumes.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/tests_lvm_multiple_disks_multiple_volumes.yml.lsrout 2020-09-09 17:48:37.476736049 -0600 | |
@@ -16,9 +16,10 @@ | |
min_size: "{{ volume_group_size }}" | |
max_return: 2 | |
disks_needed: 2 | |
- - name: Create a logical volume spanning two physical volumes that changes its mount location | |
+ - name: Create a logical volume spanning two physical volumes that changes its | |
+ mount location | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
storage_pools: | |
--- ./tests/run_blivet.yml 2020-09-03 18:14:46.368832504 -0600 | |
+++ ./tests/run_blivet.yml.lsrout 2020-09-09 17:48:37.492736145 -0600 | |
@@ -1,7 +1,7 @@ | |
--- | |
- name: test lvm and xfs package deps | |
- blivet: | |
+ fedora.system_roles.blivet: | |
packages_only: "{{ packages_only }}" | |
pools: "{{ storage_pools|default([]) }}" | |
volumes: "{{ storage_volumes|default([]) }}" | |
register: blivet_output | |
--- ./tests/tests_resize.yml 2020-09-03 18:14:46.370832516 -0600 | |
+++ ./tests/tests_resize.yml.lsrout 2020-09-09 17:48:41.429759831 -0600 | |
@@ -8,12 +8,12 @@ | |
volume_size_after: '9g' | |
invalid_size1: 'xyz GiB' | |
invalid_size2: 'none' | |
unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}' | |
- too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) * | |
- unused_disk_subfact.sectorsize|int }}' | |
- disk_size: '{{ unused_disk_subfact.sectors|int * | |
- unused_disk_subfact.sectorsize|int }}' | |
+ too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) * unused_disk_subfact.sectorsize|int | |
+ }}' | |
+ disk_size: '{{ unused_disk_subfact.sectors|int * unused_disk_subfact.sectorsize|int | |
+ }}' | |
tasks: | |
- include_role: | |
name: linux-system-roles.storage | |
@@ -24,38 +24,39 @@ | |
max_return: 1 | |
# For ext4 FS | |
- - name: Create one LVM logical volume with "{{ volume_size_before }}" under one volume group | |
+ - name: Create one LVM logical volume with "{{ volume_size_before }}" under | |
+ one volume group | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
- storage_pools: | |
- - name: foo | |
- disks: "{{ unused_disks }}" | |
- type: lvm | |
- volumes: | |
- - name: test1 | |
+ storage_pools: | |
+ - name: foo | |
+ disks: "{{ unused_disks }}" | |
+ type: lvm | |
+ volumes: | |
+ - name: test1 | |
# resizing is currently supported only for ext2/3/4 | |
- fs_type: 'ext4' | |
- size: "{{ volume_size_before }}" | |
- mount_point: "{{ mount_location }}" | |
+ fs_type: 'ext4' | |
+ size: "{{ volume_size_before }}" | |
+ mount_point: "{{ mount_location }}" | |
- include_tasks: verify-role-results.yml | |
- name: Change volume_size to "{{ volume_size_after }}" | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
- storage_pools: | |
- - name: foo | |
- type: lvm | |
- disks: "{{ unused_disks }}" | |
- volumes: | |
- - name: test1 | |
- fs_type: 'ext4' | |
- size: "{{ volume_size_after }}" | |
- mount_point: "{{ mount_location }}" | |
+ storage_pools: | |
+ - name: foo | |
+ type: lvm | |
+ disks: "{{ unused_disks }}" | |
+ volumes: | |
+ - name: test1 | |
+ fs_type: 'ext4' | |
+ size: "{{ volume_size_after }}" | |
+ mount_point: "{{ mount_location }}" | |
- include_tasks: verify-role-results.yml | |
- name: Change volume size to "{{ volume_size_before }}" | |
@@ -75,9 +76,10 @@ | |
- include_tasks: verify-role-results.yml | |
- name: Test for correct handling of too-large volume size | |
block: | |
- - name: Try to create LVM with a too-large volume size, resize to "{{ too_large_size }}" | |
+ - name: Try to create LVM with a too-large volume size, resize to "{{ too_large_size | |
+ }}" | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
storage_pools: | |
@@ -101,16 +103,16 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('volume.+cannot be resized to.+') and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('volume.+cannot\ | |
+ \ be resized to.+') and not blivet_output.changed" | |
msg: "Unexpected behavior w/ invalid volume size" | |
- name: Test for correct handling of volume size equal disk's size | |
block: | |
- - name: Try to create LVM with volume size equal disk's size, resize to "{{ disk_size }}" | |
+ - name: Try to create LVM with volume size equal disk's size, resize to | |
+ "{{ disk_size }}" | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
storage_pools: | |
@@ -134,16 +136,16 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('volume.+cannot be resized to.+') and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('volume.+cannot\ | |
+ \ be resized to.+') and not blivet_output.changed" | |
msg: "Unexpected behavior w/ invalid volume size" | |
- name: Test for correct handling of invalid size specification | |
block: | |
- - name: Try to create LVM with an invalid size specification, resize to "{{ invalid_size1 }}" | |
+ - name: Try to create LVM with an invalid size specification, resize to | |
+ "{{ invalid_size1 }}" | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
storage_pools: | |
@@ -167,16 +169,16 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('invalid size.+for volume') and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('invalid\ | |
+ \ size.+for volume') and not blivet_output.changed" | |
msg: "Unexpected behavior w/ invalid volume size" | |
- name: Test for correct handling of invalid size specification | |
block: | |
- - name: Try to create LVM with an invalid size specification, resize to "{{ invalid_size2 }}" | |
+ - name: Try to create LVM with an invalid size specification, resize to | |
+ "{{ invalid_size2 }}" | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
storage_pools: | |
@@ -200,31 +202,31 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('invalid size.+for volume') and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('invalid\ | |
+ \ size.+for volume') and not blivet_output.changed" | |
msg: "Unexpected behavior w/ invalid volume size" | |
- name: Clean up | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
- storage_pools: | |
- - name: foo | |
- disks: "{{ unused_disks }}" | |
- state: absent | |
- volumes: | |
- - name: test1 | |
- size: "{{ volume_size_before }}" | |
- mount_point: "{{ mount_location }}" | |
+ storage_pools: | |
+ - name: foo | |
+ disks: "{{ unused_disks }}" | |
+ state: absent | |
+ volumes: | |
+ - name: test1 | |
+ size: "{{ volume_size_before }}" | |
+ mount_point: "{{ mount_location }}" | |
- include_tasks: verify-role-results.yml | |
# For ext3 FS | |
- - name: Create a LVM logical volume with "{{ volume_size_before }}" for ext3 FS | |
+ - name: Create a LVM logical volume with "{{ volume_size_before }}" for ext3 | |
+ FS | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
storage_pools: | |
@@ -274,22 +276,23 @@ | |
- name: Clean up | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
- storage_pools: | |
- - name: foo | |
- disks: "{{ unused_disks }}" | |
- state: absent | |
- volumes: | |
- - name: test1 | |
- size: "{{ volume_size_before }}" | |
- mount_point: "{{ mount_location }}" | |
+ storage_pools: | |
+ - name: foo | |
+ disks: "{{ unused_disks }}" | |
+ state: absent | |
+ volumes: | |
+ - name: test1 | |
+ size: "{{ volume_size_before }}" | |
+ mount_point: "{{ mount_location }}" | |
- include_tasks: verify-role-results.yml | |
# For ext2 FS | |
- - name: Create a LVM logical volume with "{{ volume_size_before }}" for ext2 FS | |
+ - name: Create a LVM logical volume with "{{ volume_size_before }}" for ext2 | |
+ FS | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
storage_pools: | |
@@ -339,14 +342,14 @@ | |
- name: Clean up | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
- storage_pools: | |
- - name: foo | |
- disks: "{{ unused_disks }}" | |
- state: absent | |
- volumes: | |
- - name: test1 | |
- size: "{{ volume_size_before }}" | |
- mount_point: "{{ mount_location }}" | |
+ storage_pools: | |
+ - name: foo | |
+ disks: "{{ unused_disks }}" | |
+ state: absent | |
+ volumes: | |
+ - name: test1 | |
+ size: "{{ volume_size_before }}" | |
+ mount_point: "{{ mount_location }}" | |
- include_tasks: verify-role-results.yml | |
--- ./tests/tests_misc.yml 2020-09-03 18:14:46.370832516 -0600 | |
+++ ./tests/tests_misc.yml.lsrout 2020-09-09 17:48:38.353741325 -0600 | |
@@ -6,10 +6,10 @@ | |
mount_location: '/opt/test1' | |
volume_group_size: '5g' | |
volume1_size: '4g' | |
unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}' | |
- too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) * | |
- unused_disk_subfact.sectorsize|int }}' | |
+ too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) * unused_disk_subfact.sectorsize|int | |
+ }}' | |
tasks: | |
- include_role: | |
name: linux-system-roles.storage | |
@@ -75,14 +75,16 @@ | |
that: | |
- ansible_failed_result.msg != 'UNREACH' | |
msg: "Role has not failed when it should have" | |
- - name: Verify the output when creating ext4 filesystem with invalid parameter "-Fb 512" | |
+ - name: Verify the output when creating ext4 filesystem with invalid parameter | |
+ "-Fb 512" | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('Failed to commit changes to disk.*FSError.*format failed: 1.*/dev/mapper/foo-test1') and | |
- not blivet_output.changed" | |
- msg: "Unexpected behavior when creating ext4 filesystem whith invalid parameter" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('Failed\ | |
+ \ to commit changes to disk.*FSError.*format failed: 1.*/dev/mapper/foo-test1')\ | |
+ \ and not blivet_output.changed" | |
+ msg: "Unexpected behavior when creating ext4 filesystem whith invalid\ | |
+ \ parameter" | |
- name: Remove the volume group created above | |
include_role: | |
name: linux-system-roles.storage | |
@@ -92,9 +94,10 @@ | |
type: lvm | |
disks: "{{ unused_disks }}" | |
state: absent | |
- - name: Create one LVM logical volume with "{{ volume1_size }}" under one volume group | |
+ - name: Create one LVM logical volume with "{{ volume1_size }}" under one volume | |
+ group | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
storage_pools: | |
@@ -137,11 +140,10 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output when resizing with large size | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('volume.*test1.*cannot be resized to.*') and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('volume.*test1.*cannot\ | |
+ \ be resized to.*') and not blivet_output.changed" | |
msg: "Unexpected behavior when resizing with large size" | |
- name: Remove the volume group created above | |
include_role: | |
@@ -196,9 +198,9 @@ | |
- name: Test for correct handling of mounting a non-mountable formatiing type | |
block: | |
- name: Try to mount swap filesystem to "{{ mount_location }}" | |
include_role: | |
- name: linux-system-roles.storage | |
+ name: linux-system-roles.storage | |
vars: | |
storage_volumes: | |
- name: test1 | |
type: disk | |
@@ -216,10 +218,10 @@ | |
that: | |
- ansible_failed_result.msg != 'UNREACH' | |
msg: "Role has not failed when it should have" | |
- - name: Verify the output when mount swap filesystem to "{{ mount_location }}" | |
+ - name: Verify the output when mount swap filesystem to "{{ mount_location | |
+ }}" | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('volume.*test1.*has a mount point but no mountable file system') and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('volume.*test1.*has\ | |
+ \ a mount point but no mountable file system') and not blivet_output.changed" | |
msg: "Unexpected behavior when mount swap filesystem" | |
--- ./tests/tests_include_vars_from_parent.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/tests_include_vars_from_parent.yml.lsrout 2020-09-09 17:48:40.053751553 -0600 | |
@@ -21,20 +21,20 @@ | |
facts: "{{ ansible_facts }}" | |
versions: | |
- "{{ facts['distribution_version'] }}" | |
- "{{ facts['distribution_major_version'] }}" | |
- separators: [ "-", "_" ] | |
+ separators: ["-", "_"] | |
# create all variants like CentOS, CentOS_8.1, CentOS-8.1, | |
# CentOS-8, CentOS-8.1 | |
# more formally: | |
# {{ ansible_distribution }}-{{ ansible_distribution_version }} | |
# {{ ansible_distribution }}-{{ ansible_distribution_major_version }} | |
# {{ ansible_distribution }} | |
# {{ ansible_os_family }} | |
# and the same for _ as separator. | |
- varfiles: "{{ [facts['distribution']] | product(separators) | | |
- map('join') | product(versions) | map('join') | list + | |
- [facts['distribution'], facts['os_family']] }}" | |
+ varfiles: "{{ [facts['distribution']] | product(separators) | map('join')\ | |
+ \ | product(versions) | map('join') | list + [facts['distribution'], facts['os_family']]\ | |
+ \ }}" | |
- import_role: | |
name: caller | |
vars: | |
--- ./tests/test-verify-volume-device.yml 2020-09-03 18:14:46.368832504 -0600 | |
+++ ./tests/test-verify-volume-device.yml.lsrout 2020-09-09 17:48:41.919762779 -0600 | |
@@ -8,13 +8,13 @@ | |
register: storage_test_dev | |
- name: Verify the presence/absence of the device node | |
assert: | |
- that: "{{ storage_test_dev.stat.exists and storage_test_dev.stat.isblk | |
- if _storage_test_volume_present or storage_test_volume.type == 'disk' | |
- else | |
- not storage_test_dev.stat.exists }}" | |
- msg: "Incorrect device node presence for volume {{ storage_test_volume.name }}" | |
+ that: "{{ storage_test_dev.stat.exists and storage_test_dev.stat.isblk if _storage_test_volume_present\ | |
+ \ or storage_test_volume.type == 'disk' else not storage_test_dev.stat.exists\ | |
+ \ }}" | |
+ msg: "Incorrect device node presence for volume {{ storage_test_volume.name\ | |
+ \ }}" | |
- name: Make sure we got info about this volume | |
assert: | |
that: "{{ storage_test_volume._raw_device in storage_test_blkinfo.info }}" | |
@@ -22,18 +22,19 @@ | |
when: _storage_test_volume_present | |
- name: (1/2) Process volume type (set initial value) | |
set_fact: | |
- st_volume_type: "{{ storage_test_volume.type }}" | |
+ st_volume_type: "{{ storage_test_volume.type }}" | |
- name: (2/2) Process volume type (get RAID value) | |
set_fact: | |
- st_volume_type: "{{ storage_test_volume.raid_level }}" | |
+ st_volume_type: "{{ storage_test_volume.raid_level }}" | |
when: storage_test_volume.type == "raid" | |
- name: Verify the volume's device type | |
assert: | |
- that: "{{ storage_test_blkinfo.info[storage_test_volume._raw_device].type == st_volume_type }}" | |
+ that: "{{ storage_test_blkinfo.info[storage_test_volume._raw_device].type ==\ | |
+ \ st_volume_type }}" | |
when: _storage_test_volume_present | |
# disks | |
# partition: ?? | |
--- ./tests/verify-pool-member-encryption.yml 2020-09-03 18:14:46.370832516 -0600 | |
+++ ./tests/verify-pool-member-encryption.yml.lsrout 2020-09-09 17:48:42.785767989 -0600 | |
@@ -1,6 +1,7 @@ | |
- name: Get the backing device path | |
- command: realpath /dev/disk/by-uuid/{{ _storage_test_pool_member_path|regex_replace('^.*/luks-', '') }} | |
+ command: realpath /dev/disk/by-uuid/{{ _storage_test_pool_member_path|regex_replace('^.*/luks-', | |
+ '') }} | |
register: _storage_test_member_backing_dev | |
when: storage_test_pool.encryption and storage_test_pool.state == 'present' | |
- name: Collect LUKS info for this member | |
@@ -10,19 +11,25 @@ | |
changed_when: false | |
- name: Check LUKS version | |
assert: | |
- that: "{{ _storage_test_luks_dump.stdout|regex_search('^\\s+Version: ' + storage_test_pool.encryption_luks_version + '$') }}" | |
+ that: "{{ _storage_test_luks_dump.stdout|regex_search('^\\s+Version: ' + storage_test_pool.encryption_luks_version\ | |
+ \ + '$') }}" | |
msg: "Wrong LUKS version for pool {{ storage_test_pool.name }}" | |
- when: storage_test_pool.state == 'present' and storage_test_pool.encryption and storage_test_pool.encryption_luks_version | |
+ when: storage_test_pool.state == 'present' and storage_test_pool.encryption and | |
+ storage_test_pool.encryption_luks_version | |
- name: Check LUKS key size | |
assert: | |
- that: "{{ _storage_test_luks_dump.stdout|regex_search('^\\s+Key: ' + storage_test_pool.encryption_key_size|string + ' bits$') }}" | |
+ that: "{{ _storage_test_luks_dump.stdout|regex_search('^\\s+Key: ' + storage_test_pool.encryption_key_size|string\ | |
+ \ + ' bits$') }}" | |
msg: "Wrong key size for pool {{ storage_test_pool.name }}" | |
- when: storage_test_pool.state == 'present' and storage_test_pool.encryption and storage_test_pool.encryption_key_size | |
+ when: storage_test_pool.state == 'present' and storage_test_pool.encryption and | |
+ storage_test_pool.encryption_key_size | |
- name: Check LUKS cipher | |
assert: | |
- that: "{{ _storage_test_luks_dump.stdout|regex_search('^\\s+Cipher: ' + storage_test_pool.encryption_cipher + '$') }}" | |
+ that: "{{ _storage_test_luks_dump.stdout|regex_search('^\\s+Cipher: ' + storage_test_pool.encryption_cipher\ | |
+ \ + '$') }}" | |
msg: "Wrong key size for pool {{ storage_test_pool.name }}" | |
- when: storage_test_pool.state == 'present' and storage_test_pool.encryption and storage_test_pool.encryption_cipher | |
+ when: storage_test_pool.state == 'present' and storage_test_pool.encryption and | |
+ storage_test_pool.encryption_cipher | |
--- ./tests/verify-pool-members-encryption.yml 2020-09-03 18:14:46.370832516 -0600 | |
+++ ./tests/verify-pool-members-encryption.yml.lsrout 2020-09-09 17:48:36.120727891 -0600 | |
@@ -1,10 +1,12 @@ | |
# | |
# /etc/crypttab | |
# | |
- set_fact: | |
- _storage_test_expected_crypttab_entries: "{{ (storage_test_pool.encryption and storage_test_pool.state == 'present')|ternary(1, 0) }}" | |
- _storage_test_expected_crypttab_key_file: "{{ storage_test_pool.encryption_key or '-' }}" | |
+ _storage_test_expected_crypttab_entries: "{{ (storage_test_pool.encryption and\ | |
+ \ storage_test_pool.state == 'present')|ternary(1, 0) }}" | |
+ _storage_test_expected_crypttab_key_file: "{{ storage_test_pool.encryption_key\ | |
+ \ or '-' }}" | |
- name: Validate pool member LUKS settings | |
include_tasks: verify-pool-member-encryption.yml | |
loop: "{{ _storage_test_pool_pvs }}" | |
@@ -19,6 +21,6 @@ | |
loop_var: _storage_test_pool_member_path | |
when: storage_test_pool.type == 'lvm' | |
- set_fact: | |
- _storage_test_crypttab_entries: null | |
- _storage_test_crypttab_key_file: null | |
+ _storage_test_crypttab_entries: | |
+ _storage_test_crypttab_key_file: | |
--- ./tests/test-verify-pool-members.yml 2020-09-03 18:14:46.368832504 -0600 | |
+++ ./tests/test-verify-pool-members.yml.lsrout 2020-09-09 17:48:42.135764078 -0600 | |
@@ -1,47 +1,56 @@ | |
- set_fact: | |
- _storage_test_pool_pvs_lvm: "{{ ansible_lvm.pvs|dict2items(key_name='path', value_name='info')|json_query('[?info.vg==`\"{}\"`].path'.format(storage_test_pool.name))|list }}" | |
+ _storage_test_pool_pvs_lvm: "{{ ansible_lvm.pvs|dict2items(key_name='path',\ | |
+ \ value_name='info')|json_query('[?info.vg==`\"{}\"`].path'.format(storage_test_pool.name))|list\ | |
+ \ }}" | |
_storage_test_pool_pvs: [] | |
- _storage_test_expected_pv_count: "{{ 0 if storage_test_pool.state == 'absent' else (storage_test_pool.raid_level | ternary(1, storage_test_pool.disks|length)) }}" | |
+ _storage_test_expected_pv_count: "{{ 0 if storage_test_pool.state == 'absent'\ | |
+ \ else (storage_test_pool.raid_level | ternary(1, storage_test_pool.disks|length))\ | |
+ \ }}" | |
when: storage_test_pool.type == 'lvm' | |
- name: Get the canonical device path for each member device | |
- resolve_blockdev: | |
+ fedora.system_roles.resolve_blockdev: | |
spec: "{{ pv }}" | |
loop: "{{ _storage_test_pool_pvs_lvm }}" | |
loop_control: | |
loop_var: pv | |
register: pv_paths | |
when: storage_test_pool.type == 'lvm' | |
- set_fact: | |
- _storage_test_pool_pvs: "{{ _storage_test_pool_pvs }} + [ '{{ pv_paths.results[idx].device }}' ]" | |
+ _storage_test_pool_pvs: "{{ _storage_test_pool_pvs }} + [ '{{ pv_paths.results[idx].device\ | |
+ \ }}' ]" | |
loop: "{{ _storage_test_pool_pvs_lvm }}" | |
loop_control: | |
index_var: idx | |
when: storage_test_pool.type == 'lvm' | |
- name: Verify PV count | |
assert: | |
- that: "{{ ansible_lvm.pvs|dict2items|json_query('[?value.vg==`\"{}\"`]'.format(storage_test_pool.name))|length == _storage_test_expected_pv_count|int }}" | |
+ that: "{{ ansible_lvm.pvs|dict2items|json_query('[?value.vg==`\"{}\"`]'.format(storage_test_pool.name))|length\ | |
+ \ == _storage_test_expected_pv_count|int }}" | |
msg: "Unexpected PV count for pool {{ storage_test_pool.name }}" | |
when: storage_test_pool.type == 'lvm' | |
- set_fact: | |
- _storage_test_expected_pv_type: "{{ 'crypt' if storage_test_pool.encryption else 'disk' }}" | |
+ _storage_test_expected_pv_type: "{{ 'crypt' if storage_test_pool.encryption\ | |
+ \ else 'disk' }}" | |
when: storage_test_pool.type == 'lvm' | |
- set_fact: | |
- _storage_test_expected_pv_type: "{{ 'partition' if storage_use_partitions|default(false) else 'disk' }}" | |
+ _storage_test_expected_pv_type: "{{ 'partition' if storage_use_partitions|default(false)\ | |
+ \ else 'disk' }}" | |
when: storage_test_pool.type == 'lvm' and not storage_test_pool.encryption | |
- set_fact: | |
_storage_test_expected_pv_type: "{{ storage_test_pool.raid_level }}" | |
when: storage_test_pool.type == 'lvm' and storage_test_pool.raid_level | |
- name: Check the type of each PV | |
assert: | |
- that: "{{ storage_test_blkinfo.info[pv]['type'] == _storage_test_expected_pv_type }}" | |
+ that: "{{ storage_test_blkinfo.info[pv]['type'] == _storage_test_expected_pv_type\ | |
+ \ }}" | |
msg: "Incorrect type for PV {{ pv }} in pool {{ storage_test_pool.name }}" | |
loop: "{{ _storage_test_pool_pvs }}" | |
loop_control: | |
loop_var: pv | |
@@ -53,8 +62,8 @@ | |
- name: Check member encryption | |
include_tasks: verify-pool-members-encryption.yml | |
- set_fact: | |
- _storage_test_expected_pv_type: null | |
- _storage_test_expected_pv_count: null | |
+ _storage_test_expected_pv_type: | |
+ _storage_test_expected_pv_count: | |
_storage_test_pool_pvs_lvm: [] | |
_storage_test_pool_pvs: [] | |
--- ./tests/test-verify-volume-md.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/test-verify-volume-md.yml.lsrout 2020-09-09 17:48:36.897732566 -0600 | |
@@ -10,17 +10,20 @@ | |
changed_when: false | |
# pre-chew regex search patterns | |
- set_fact: | |
- storage_test_md_active_devices_re: "{{('Active Devices : ' ~ storage_test_volume.raid_device_count ~ '\n')|regex_escape()}}" | |
+ storage_test_md_active_devices_re: "{{('Active Devices : ' ~ storage_test_volume.raid_device_count\ | |
+ \ ~ '\n')|regex_escape()}}" | |
when: storage_test_volume.raid_device_count is defined | |
- set_fact: | |
- storage_test_md_spare_devices_re: "{{('Spare Devices : ' ~ storage_test_volume.raid_spare_count ~ '\n')|regex_escape()}}" | |
+ storage_test_md_spare_devices_re: "{{('Spare Devices : ' ~ storage_test_volume.raid_spare_count\ | |
+ \ ~ '\n')|regex_escape()}}" | |
when: storage_test_volume.raid_spare_count is defined | |
- set_fact: | |
- storage_test_md_metadata_version_re: "{{('Version : ' ~ storage_test_volume.raid_metadata_version ~ '\n')|regex_escape()}}" | |
+ storage_test_md_metadata_version_re: "{{('Version : ' ~ storage_test_volume.raid_metadata_version\ | |
+ \ ~ '\n')|regex_escape()}}" | |
when: storage_test_volume.raid_metadata_version is defined | |
- name: check RAID active devices count | |
assert: | |
@@ -36,8 +39,9 @@ | |
- name: check RAID metadata version | |
assert: | |
that: "storage_test_mdadm.stdout is regex(storage_test_md_metadata_version_re)" | |
- msg: "Expected {{ storage_test_volume.raid_metadata_version }} RAID metadata version." | |
+ msg: "Expected {{ storage_test_volume.raid_metadata_version }} RAID metadata\ | |
+ \ version." | |
when: storage_test_volume.raid_metadata_version is not none | |
when: storage_test_volume.type == 'raid' and storage_test_volume._device != "" | |
--- ./tests/tests_create_partition_volume_then_remove.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/tests_create_partition_volume_then_remove.yml.lsrout 2020-09-09 17:48:42.282764963 -0600 | |
@@ -47,9 +47,9 @@ | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
storage_pools: | |
- - name: "{{ unused_disks[0] }}" | |
+ - name: "{{ unused_disks[0] }}" | |
type: partition | |
disks: "{{ unused_disks }}" | |
state: absent | |
volumes: | |
@@ -64,9 +64,9 @@ | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
storage_pools: | |
- - name: "{{ unused_disks[0] }}" | |
+ - name: "{{ unused_disks[0] }}" | |
type: partition | |
disks: "{{ unused_disks }}" | |
state: absent | |
volumes: | |
--- ./tests/tests_lvm_errors.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/tests_lvm_errors.yml.lsrout 2020-09-09 17:48:36.012727242 -0600 | |
@@ -10,10 +10,10 @@ | |
invalid_disks: | |
- '/non/existent/disk' | |
invalid_size: 'xyz GiB' | |
unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}' | |
- too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) * | |
- unused_disk_subfact.sectorsize|int }}' | |
+ too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) * unused_disk_subfact.sectorsize|int | |
+ }}' | |
tasks: | |
- include_role: | |
name: linux-system-roles.storage | |
@@ -251,11 +251,10 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output of the duplicate pools test | |
assert: | |
- that: "blivet_output.failed and | |
- 'multiple pools with the same name' in blivet_output.msg and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and 'multiple pools with the same name'\ | |
+ \ in blivet_output.msg and not blivet_output.changed" | |
msg: "Unexpected behavior w/ multiple pools sharing one name" | |
- name: Test for correct handling of duplicate volume names within a pool | |
block: | |
@@ -289,12 +288,12 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output of the duplicate volumes test | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('multiple volumes in pool.*with the same name') and | |
- not blivet_output.changed" | |
- msg: "Unexpected behavior w/ multiple volumes within a pool sharing a name" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('multiple\ | |
+ \ volumes in pool.*with the same name') and not blivet_output.changed" | |
+ msg: "Unexpected behavior w/ multiple volumes within a pool sharing\ | |
+ \ a name" | |
- name: Create a pool | |
include_role: | |
name: linux-system-roles.storage | |
@@ -335,16 +334,17 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output | |
assert: | |
- that: "{{ blivet_output.failed and | |
- blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and | |
- not blivet_output.changed }}" | |
+ that: "{{ blivet_output.failed and blivet_output.msg|regex_search('cannot\ | |
+ \ remove existing formatting on volume.*in safe mode') and not blivet_output.changed\ | |
+ \ }}" | |
msg: "Unexpected behavior w/ existing data on specified disks" | |
- name: Test for correct handling of safe_mode with existing pool | |
block: | |
- - name: Try to create LVM pool on disks that already belong to an existing pool | |
+ - name: Try to create LVM pool on disks that already belong to an existing | |
+ pool | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
storage_pools: | |
@@ -364,11 +364,11 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output | |
assert: | |
- that: "{{ blivet_output.failed and | |
- blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and | |
- not blivet_output.changed }}" | |
+ that: "{{ blivet_output.failed and blivet_output.msg|regex_search('cannot\ | |
+ \ remove existing formatting and/or devices on disk.*in safe mode')\ | |
+ \ and not blivet_output.changed }}" | |
msg: "Unexpected behavior w/ existing data on specified disks" | |
- name: Test for correct handling of safe_mode | |
block: | |
@@ -395,11 +395,10 @@ | |
msg: "Role has not failed when it should have" | |
- name: Verify the output | |
assert: | |
- that: "blivet_output.failed and | |
- blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and | |
- not blivet_output.changed" | |
+ that: "blivet_output.failed and blivet_output.msg|regex_search('cannot remove\ | |
+ \ existing formatting on volume.*in safe mode') and not blivet_output.changed" | |
msg: "Unexpected behavior w/ existing data on specified disks" | |
- name: Clean up | |
include_role: | |
--- ./tests/tests_change_disk_fs.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/tests_change_disk_fs.yml.lsrout 2020-09-09 17:48:39.313747101 -0600 | |
@@ -4,9 +4,10 @@ | |
vars: | |
storage_safe_mode: false | |
mount_location: '/opt/test' | |
volume_size: '5g' | |
- fs_type_after: "{{ 'ext3' if (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') else 'ext4' }}" | |
+ fs_type_after: "{{ 'ext3' if (ansible_distribution == 'RedHat' and ansible_distribution_major_version\ | |
+ \ == '6') else 'ext4' }}" | |
tasks: | |
- include_role: | |
name: linux-system-roles.storage | |
--- ./tests/tests_lvm_one_disk_one_volume.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/tests_lvm_one_disk_one_volume.yml.lsrout 2020-09-09 17:48:36.816732078 -0600 | |
@@ -18,43 +18,43 @@ | |
- name: Create one LVM logical volume under one volume group | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
- storage_pools: | |
- - name: foo | |
- disks: "{{ unused_disks }}" | |
- volumes: | |
- - name: test1 | |
- size: "{{ volume_size }}" | |
- mount_point: "{{ mount_location }}" | |
+ storage_pools: | |
+ - name: foo | |
+ disks: "{{ unused_disks }}" | |
+ volumes: | |
+ - name: test1 | |
+ size: "{{ volume_size }}" | |
+ mount_point: "{{ mount_location }}" | |
- include_tasks: verify-role-results.yml | |
- name: Repeat the previous invocation to verify idempotence | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
- storage_pools: | |
- - name: foo | |
- disks: "{{ unused_disks }}" | |
- volumes: | |
- - name: test1 | |
- size: "{{ volume_size }}" | |
- mount_point: "{{ mount_location }}" | |
+ storage_pools: | |
+ - name: foo | |
+ disks: "{{ unused_disks }}" | |
+ volumes: | |
+ - name: test1 | |
+ size: "{{ volume_size }}" | |
+ mount_point: "{{ mount_location }}" | |
- include_tasks: verify-role-results.yml | |
- name: Clean up | |
include_role: | |
name: linux-system-roles.storage | |
vars: | |
- storage_pools: | |
- - name: foo | |
- disks: "{{ unused_disks }}" | |
- state: absent | |
- volumes: | |
- - name: test1 | |
- size: "{{ volume_size }}" | |
- mount_point: "{{ mount_location }}" | |
- state: absent | |
+ storage_pools: | |
+ - name: foo | |
+ disks: "{{ unused_disks }}" | |
+ state: absent | |
+ volumes: | |
+ - name: test1 | |
+ size: "{{ volume_size }}" | |
+ mount_point: "{{ mount_location }}" | |
+ state: absent | |
- include_tasks: verify-role-results.yml | |
--- ./tests/test-verify-volume-size.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/test-verify-volume-size.yml.lsrout 2020-09-09 17:48:38.988745146 -0600 | |
@@ -1,17 +1,19 @@ | |
--- | |
- name: parse the actual size of the volume | |
- bsize: | |
+ fedora.system_roles.bsize: | |
size: "{{ storage_test_blkinfo.info[storage_test_volume._device].size }}" | |
register: storage_test_actual_size | |
- when: _storage_test_volume_present and storage_test_volume.type not in ('partition', 'disk') | |
+ when: _storage_test_volume_present and storage_test_volume.type not in ('partition', | |
+ 'disk') | |
- name: parse the requested size of the volume | |
- bsize: | |
+ fedora.system_roles.bsize: | |
size: "{{ storage_test_volume.size }}" | |
register: storage_test_requested_size | |
- when: _storage_test_volume_present and storage_test_volume.size is defined and storage_test_volume.type not in ('partition', 'disk', 'raid') | |
+ when: _storage_test_volume_present and storage_test_volume.size is defined and | |
+ storage_test_volume.type not in ('partition', 'disk', 'raid') | |
- debug: | |
var: storage_test_actual_size | |
@@ -20,5 +22,6 @@ | |
- assert: | |
that: storage_test_actual_size == storage_test_requested_size | |
msg: "Volume {{ storage_test_volume.name }} has unexpected size" | |
- when: _storage_test_volume_present and storage_test_volume.size is defined and storage_test_volume.type not in ('partition', 'disk', 'raid') | |
+ when: _storage_test_volume_present and storage_test_volume.size is defined and | |
+ storage_test_volume.type not in ('partition', 'disk', 'raid') | |
--- ./tests/test-verify-volume.yml 2020-09-03 18:14:46.369832510 -0600 | |
+++ ./tests/test-verify-volume.yml.lsrout 2020-09-09 17:48:39.025745368 -0600 | |
@@ -1,12 +1,15 @@ | |
--- | |
- set_fact: | |
- _storage_volume_tests: ['mount', 'fstab', 'fs', 'device', 'encryption', 'md', 'size'] # fs: type, label device: name, type, disks | |
+ _storage_volume_tests: ['mount', 'fstab', 'fs', 'device', 'encryption', 'md', | |
+ 'size'] # fs: type, label device: name, type, disks | |
# future: | |
# device: | |
# compression | |
# deduplication | |
- _storage_test_volume_present: "{{ storage_test_volume.state == 'present' and (not storage_test_pool|default() or storage_test_pool.state == 'present') }}" | |
+ _storage_test_volume_present: "{{ storage_test_volume.state == 'present' and\ | |
+ \ (not storage_test_pool|default() or storage_test_pool.state == 'present')\ | |
+ \ }}" | |
- name: | |
include_tasks: "test-verify-volume-{{ storage_test_volume_subset }}.yml" | |
loop: "{{ _storage_volume_tests }}" | |
@@ -14,5 +17,5 @@ | |
loop_var: storage_test_volume_subset | |
- name: Clean up facts | |
set_fact: | |
- _storage_test_volume_present: null | |
+ _storage_test_volume_present: | |
--- ./defaults/main.yml 2020-09-03 18:14:46.367832498 -0600 | |
+++ ./defaults/main.yml.lsrout 2020-09-09 17:48:42.888768609 -0600 | |
@@ -1,28 +1,28 @@ | |
--- | |
# defaults file for template | |
storage_provider: "blivet" | |
-storage_use_partitions: null | |
-storage_disklabel_type: null # leave unset to allow the role to select an appropriate label type | |
+storage_use_partitions: | |
+storage_disklabel_type: # leave unset to allow the role to select an appropriate label type | |
storage_safe_mode: true # fail instead of implicitly/automatically removing devices or formatting | |
storage_pool_defaults: | |
state: "present" | |
type: lvm | |
volumes: [] | |
encryption: false | |
- encryption_password: null | |
- encryption_key: null | |
- encryption_cipher: null | |
- encryption_key_size: null | |
- encryption_luks_version: null | |
- | |
- raid_level: null | |
- raid_device_count: null | |
- raid_spare_count: null | |
- raid_chunk_size: null | |
- raid_metadata_version: null | |
+ encryption_password: | |
+ encryption_key: | |
+ encryption_cipher: | |
+ encryption_key_size: | |
+ encryption_luks_version: | |
+ | |
+ raid_level: | |
+ raid_device_count: | |
+ raid_spare_count: | |
+ raid_chunk_size: | |
+ raid_metadata_version: | |
storage_volume_defaults: | |
state: "present" | |
type: lvm | |
@@ -38,16 +38,16 @@ | |
mount_check: 0 | |
mount_passno: 0 | |
mount_device_identifier: "uuid" # uuid|label|path | |
- raid_level: null | |
- raid_device_count: null | |
- raid_spare_count: null | |
- raid_chunk_size: null | |
- raid_metadata_version: null | |
+ raid_level: | |
+ raid_device_count: | |
+ raid_spare_count: | |
+ raid_chunk_size: | |
+ raid_metadata_version: | |
encryption: false | |
- encryption_password: null | |
- encryption_key: null | |
- encryption_cipher: null | |
- encryption_key_size: null | |
- encryption_luks_version: null | |
+ encryption_password: | |
+ encryption_key: | |
+ encryption_cipher: | |
+ encryption_key_size: | |
+ encryption_luks_version: | |
--- ./tasks/main.yml 2020-09-03 18:14:46.368832504 -0600 | |
+++ ./tasks/main.yml.lsrout 2020-09-09 17:48:43.273770925 -0600 | |
@@ -3,10 +3,10 @@ | |
include_vars: "{{ lookup('first_found', ffparams) }}" | |
vars: | |
ffparams: | |
files: | |
- - "{{ ansible_facts['distribution'] }}_\ | |
- {{ ansible_facts['distribution_major_version'] }}.yml" | |
+ - "{{ ansible_facts['distribution'] }}_{{ ansible_facts['distribution_major_version']\ | |
+ \ }}.yml" | |
- "{{ ansible_facts['distribution'] }}.yml" | |
- "{{ ansible_facts['os_family'] }}.yml" | |
paths: | |
- "{{ role_path }}/vars" | |
--- ./tasks/main-blivet.yml 2020-09-03 18:14:46.368832504 -0600 | |
+++ ./tasks/main-blivet.yml.lsrout 2020-09-09 17:48:43.219770600 -0600 | |
@@ -16,35 +16,40 @@ | |
## perhaps because I am new to ansible/yaml/jinja. | |
## | |
- name: initialize internal facts | |
set_fact: | |
- _storage_pools: [] # list of pools w/ defaults applied as necessary | |
- _storage_volumes: [] # list of standalone volumes w/ defaults applied as necessary | |
- _storage_vol_defaults: [] # list w/ volume default dict for each pool volume | |
- _storage_vols_no_defaults: [] # combined list of pool volumes w/o defaults applied | |
- _storage_vols_w_defaults: [] # combined list of volumes w/ defaults applied | |
- _storage_vol_pools: [] # combined list of pool name for each volume | |
- _storage_vols_no_defaults_by_pool: {} # dict w/ pool name keys and pool volume list values | |
+ _storage_pools: [] # list of pools w/ defaults applied as necessary | |
+ _storage_volumes: [] # list of standalone volumes w/ defaults applied as necessary | |
+ _storage_vol_defaults: [] # list w/ volume default dict for each pool volume | |
+ _storage_vols_no_defaults: [] # combined list of pool volumes w/o defaults applied | |
+ _storage_vols_w_defaults: [] # combined list of volumes w/ defaults applied | |
+ _storage_vol_pools: [] # combined list of pool name for each volume | |
+ _storage_vols_no_defaults_by_pool: {} # dict w/ pool name keys and pool volume list values | |
- name: Apply defaults to pools and volumes [1/6] | |
set_fact: | |
- _storage_pools: "{{ _storage_pools|default([]) }} + [ {{ storage_pool_defaults|combine(pool) }} ]" | |
+ _storage_pools: "{{ _storage_pools|default([]) }} + [ {{ storage_pool_defaults|combine(pool)\ | |
+ \ }} ]" | |
loop: "{{ storage_pools|default([]) }}" | |
loop_control: | |
loop_var: pool | |
when: storage_pools is defined | |
- name: Apply defaults to pools and volumes [2/6] | |
set_fact: | |
- _storage_vols_no_defaults: "{{ _storage_vols_no_defaults|default([]) }} + [{{ item.1 }}]" | |
- _storage_vol_defaults: "{{ _storage_vol_defaults|default([]) }} + [{{ storage_volume_defaults }}]" | |
- _storage_vol_pools: "{{ _storage_vol_pools|default([]) }} + ['{{ item.0.name }}']" | |
+ _storage_vols_no_defaults: "{{ _storage_vols_no_defaults|default([]) }} + [{{\ | |
+ \ item.1 }}]" | |
+ _storage_vol_defaults: "{{ _storage_vol_defaults|default([]) }} + [{{ storage_volume_defaults\ | |
+ \ }}]" | |
+ _storage_vol_pools: "{{ _storage_vol_pools|default([]) }} + ['{{ item.0.name\ | |
+ \ }}']" | |
loop: "{{ _storage_pools|subelements('volumes', skip_missing=true) }}" | |
when: storage_pools is defined | |
- name: Apply defaults to pools and volumes [3/6] | |
set_fact: | |
- _storage_vols_w_defaults: "{{ _storage_vols_w_defaults|default([]) }} + [ {{ item.1|combine(item.0, {'pool': _storage_vol_pools[idx]}) }} ]" | |
+ _storage_vols_w_defaults: "{{ _storage_vols_w_defaults|default([]) }} + [ {{\ | |
+ \ item.1|combine(item.0, {'pool': _storage_vol_pools[idx]}) }} ]" | |
loop: "{{ _storage_vols_no_defaults|zip(_storage_vol_defaults)|list }}" | |
loop_control: | |
index_var: idx | |
when: storage_pools is defined | |
@@ -52,27 +57,29 @@ | |
- name: Apply defaults to pools and volumes [4/6] | |
set_fact: | |
# json_query(...) used instead of "|selectattr('pool', 'equalto', item.name)|list" | |
# as that expression wouldn't work with Jinja versions <2.8 | |
- _storage_vols_no_defaults_by_pool: "{{ _storage_vols_no_defaults_by_pool|default({})| | |
- combine({item.name: _storage_vols_w_defaults|json_query('[?pool==`\"{}\"`]'.format(item.name))}) }}" | |
+ _storage_vols_no_defaults_by_pool: "{{ _storage_vols_no_defaults_by_pool|default({})|\ | |
+ \ combine({item.name: _storage_vols_w_defaults|json_query('[?pool==`\"{}\"\ | |
+ `]'.format(item.name))}) }}" | |
loop: "{{ _storage_pools }}" | |
when: storage_pools is defined | |
- name: Apply defaults to pools and volumes [5/6] | |
set_fact: | |
- _storage_pools: "{{ _storage_pools[:idx] }} + | |
- [ {{ pool|combine({'volumes': _storage_vols_no_defaults_by_pool[pool.name]}) }} ] + | |
- {{ _storage_pools[idx+1:] }}" | |
+ _storage_pools: "{{ _storage_pools[:idx] }} + [ {{ pool|combine({'volumes':\ | |
+ \ _storage_vols_no_defaults_by_pool[pool.name]}) }} ] + {{ _storage_pools[idx+1:]\ | |
+ \ }}" | |
loop: "{{ _storage_pools }}" | |
loop_control: | |
loop_var: pool | |
index_var: idx | |
when: storage_pools is defined | |
- name: Apply defaults to pools and volumes [6/6] | |
set_fact: | |
- _storage_volumes: "{{ _storage_volumes|default([]) }} + [ {{ storage_volume_defaults|combine(volume) }} ]" | |
+ _storage_volumes: "{{ _storage_volumes|default([]) }} + [ {{ storage_volume_defaults|combine(volume)\ | |
+ \ }} ]" | |
loop: "{{ storage_volumes|default([]) }}" | |
loop_control: | |
loop_var: volume | |
when: storage_volumes is defined | |
@@ -87,9 +94,9 @@ | |
- debug: | |
var: _storage_volumes | |
- name: get required packages | |
- blivet: | |
+ fedora.system_roles.blivet: | |
pools: "{{ _storage_pools }}" | |
volumes: "{{ _storage_volumes }}" | |
use_partitions: "{{ storage_use_partitions }}" | |
disklabel_type: "{{ storage_disklabel_type }}" | |
@@ -101,15 +108,16 @@ | |
name: "{{ package_info.packages }}" | |
state: present | |
- name: manage the pools and volumes to match the specified state | |
- blivet: | |
+ fedora.system_roles.blivet: | |
pools: "{{ _storage_pools }}" | |
volumes: "{{ _storage_volumes }}" | |
use_partitions: "{{ storage_use_partitions }}" | |
disklabel_type: "{{ storage_disklabel_type }}" | |
safe_mode: "{{ storage_safe_mode }}" | |
- diskvolume_mkfs_option_map: "{{ __storage_blivet_diskvolume_mkfs_option_map|d(omit) }}" | |
+ diskvolume_mkfs_option_map: "{{ __storage_blivet_diskvolume_mkfs_option_map|d(omit)\ | |
+ \ }}" | |
register: blivet_output | |
- debug: | |
var: blivet_output | |
--- ./meta/main.yml 2020-09-03 18:14:46.368832504 -0600 | |
+++ ./meta/main.yml.lsrout 2020-09-09 17:48:43.383771587 -0600 | |
@@ -6,7 +6,7 @@ | |
license: MIT | |
min_ansible_version: 2.5 | |
platforms: | |
- name: Fedora | |
- versions: [ 31, 32 ] | |
+ versions: [31, 32] | |
- name: EL | |
- versions: [ 7, 8 ] | |
+ versions: [7, 8] |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment