Last active
October 3, 2019 20:22
-
-
Save mzpqnxow/09f3a7dde06b5ed5f7ecbe3b0c51a91d to your computer and use it in GitHub Desktop.
Workaround: LVM can't detect VG by UUID and vgcfgrestore fails to construct volume group even though blkid shows the correct UUID
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # | |
| # This is a boilerplate LVM configuration file (/etc/lvm/lvm.conf) with a few simple modifications | |
| # that will fix a bug encountered when booting a no-modules kernel with LVM inside of a LUKS device | |
| # The bug is best described as a failure of LVM to find a disk by UUID, even though the device with | |
| # that UUID is present (in this case, in the form of an unlocked LUKS volume) | |
| # | |
| ## | |
| # Symptoms/Preconditions: | |
| # - LUKS device is unlocked and the UUID for the VG can be seen in the `blkid` output | |
| # - The LVM VGs are not present in /dev/mapper/ as expected | |
| # - Using pvscan, lvscan, vgscan and vgchange -ay have no effect | |
| # - Using vgcfgrestore with the backup file defining the configuration of the VG (explicitly!) | |
| # fails, saying it can't find the UUID for the PV | |
| # - The PV UUID is present in `blkid` output, suggesting LVM is searching using some alternate | |
| # method to find the PV by UUID | |
| # | |
| ## Cause | |
| # I honestly do not know. This may be something related to udev, as I believe the LVM tools in | |
| # the default configuration rely on UUID information from udev. As this behavior was only | |
| # encountered on a custom built kernel with modules disabled, there is a strong likelihood that | |
| # this has *something* to do with the problem- but the solution doesn't seem to have anything to do | |
| # with missing kernel features.. ??? | |
| # EDIT: Cause seems likely to be a lack of a running lvmetad .. | |
| # | |
| # Solution/Workaround: | |
| # | |
| # The important changes are in the scan and filter lines. Once the lvm.conf is changed to include the | |
| # added regex patterns, the lvm tools are able to discover the UUID correctly. Once the lvm.conf file | |
| # is updated, it is possible to use all of the standard commands to activate the VG.. | |
| # | |
| # lvm pvscan | |
| # lvm lvscan | |
| # lvm vgscan --mknodes | |
| # vgchange -ay | |
| # | |
| # Probably the following will be all that you need (if you have a VG backup file) | |
| # vgcfgrestore -f /etc/lvm/backup/<my>-vg <my>-vg | |
| # cgchange -ay | |
| # | |
| config { | |
| checks = 1 | |
| abort_on_errors = 0 | |
| profile_dir = "/etc/lvm/profile" | |
| } | |
| devices { | |
| dir = "/dev" | |
| scan = [ "/dev", "/dev/mapper" ] | |
| obtain_device_list_from_udev = 1 | |
| external_device_info_source = "none" | |
| filter = [ "a|/dev/sda.*|", "a|/dev/mapper/.*|", "a|/dev/disk/by-uuid/.*|", "a|/dev/disk/by-id/dm-uuid-.*mpath-.*|", "r|.*|"] | |
| sysfs_scan = 1 | |
| scan_lvs = 1 | |
| multipath_component_detection = 1 | |
| md_component_detection = 1 | |
| fw_raid_component_detection = 0 | |
| md_chunk_alignment = 1 | |
| data_alignment_detection = 1 | |
| data_alignment = 0 | |
| data_alignment_offset_detection = 1 | |
| ignore_suspended_devices = 0 | |
| ignore_lvm_mirrors = 1 | |
| require_restorefile_with_uuid = 1 | |
| pv_min_size = 2048 | |
| issue_discards = 0 | |
| allow_changes_with_duplicate_pvs = 0 | |
| } | |
| allocation { | |
| maximise_cling = 1 | |
| use_blkid_wiping = 1 | |
| wipe_signatures_when_zeroing_new_lvs = 1 | |
| mirror_logs_require_separate_pvs = 0 | |
| cache_pool_metadata_require_separate_pvs = 0 | |
| thin_pool_metadata_require_separate_pvs = 0 | |
| } | |
| log { | |
| verbose = 0 | |
| silent = 0 | |
| syslog = 1 | |
| overwrite = 0 | |
| level = 0 | |
| indent = 1 | |
| command_names = 0 | |
| prefix = " " | |
| activation = 0 | |
| debug_classes = [ "memory", "devices", "io", "activation", "allocation", "metadata", "cache", "locking", "lvmpolld", "dbus" ] | |
| } | |
| backup { | |
| backup = 1 | |
| backup_dir = "/etc/lvm/backup" | |
| archive = 1 | |
| archive_dir = "/etc/lvm/archive" | |
| retain_min = 10 | |
| retain_days = 30 | |
| } | |
| shell { | |
| history_size = 100 | |
| } | |
| global { | |
| umask = 077 | |
| test = 0 | |
| units = "r" | |
| si_unit_consistency = 1 | |
| suffix = 1 | |
| activation = 1 | |
| proc = "/proc" | |
| etc = "/etc" | |
| wait_for_locks = 1 | |
| locking_dir = "/run/lock/lvm" | |
| prioritise_write_locks = 1 | |
| abort_on_internal_errors = 0 | |
| metadata_read_only = 0 | |
| mirror_segtype_default = "raid1" | |
| raid10_segtype_default = "raid10" | |
| sparse_segtype_default = "thin" | |
| event_activation = 1 | |
| use_lvmlockd = 0 | |
| system_id_source = "none" | |
| use_lvmpolld = 1 | |
| notify_dbus = 1 | |
| } | |
| activation { | |
| checks = 0 | |
| udev_sync = 1 | |
| udev_rules = 1 | |
| verify_udev_operations = 0 | |
| retry_deactivation = 1 | |
| missing_stripe_filler = "error" | |
| use_linear_target = 1 | |
| reserved_stack = 64 | |
| reserved_memory = 8192 | |
| process_priority = -18 | |
| raid_region_size = 2048 | |
| readahead = "auto" | |
| raid_fault_policy = "warn" | |
| mirror_image_fault_policy = "remove" | |
| mirror_log_fault_policy = "allocate" | |
| snapshot_autoextend_threshold = 100 | |
| snapshot_autoextend_percent = 20 | |
| thin_pool_autoextend_threshold = 100 | |
| thin_pool_autoextend_percent = 20 | |
| vdo_pool_autoextend_threshold = 100 | |
| use_mlockall = 0 | |
| monitoring = 1 | |
| polling_interval = 15 | |
| activation_mode = "degraded" | |
| } | |
| dmeventd { | |
| mirror_library = "libdevmapper-event-lvm2mirror.so" | |
| snapshot_library = "libdevmapper-event-lvm2snapshot.so" | |
| thin_library = "libdevmapper-event-lvm2thin.so" | |
| } | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment