diff --git a/0002-Add-support-for-specifying-stripe-size-for-RAID-LVs.patch b/0002-Add-support-for-specifying-stripe-size-for-RAID-LVs.patch deleted file mode 100644 index 5e407c4..0000000 --- a/0002-Add-support-for-specifying-stripe-size-for-RAID-LVs.patch +++ /dev/null @@ -1,170 +0,0 @@ -From ce21086cfce21faacdb29adb8e28ede24c2bfa50 Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Thu, 9 Mar 2023 13:18:42 +0100 -Subject: [PATCH] Add support for specifying stripe size for RAID LVs - ---- - blivet/devices/lvm.py | 28 +++++++++++++++++--- - tests/storage_tests/devices_test/lvm_test.py | 12 +++++++-- - tests/unit_tests/devices_test/lvm_test.py | 27 +++++++++++++++++++ - 3 files changed, 61 insertions(+), 6 deletions(-) - -diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py -index 434c359f6..e97b61916 100644 ---- a/blivet/devices/lvm.py -+++ b/blivet/devices/lvm.py -@@ -659,7 +659,8 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice): - - def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, - fmt=None, exists=False, sysfs_path='', grow=None, maxsize=None, -- percent=None, cache_request=None, pvs=None, from_lvs=None): -+ percent=None, cache_request=None, pvs=None, from_lvs=None, -+ stripe_size=0): - - if not exists: - if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types: -@@ -756,6 +757,15 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, - if self._pv_specs: - self._assign_pv_space() - -+ self._stripe_size = stripe_size -+ if not self.exists and self._stripe_size: -+ if self.seg_type not in lvm.raid_seg_types: -+ raise errors.DeviceError("Stripe size can be specified only for RAID volumes") -+ if self.seg_type in ("raid1", "RAID1", "1", 1, "mirror"): -+ raise errors.DeviceError("Specifying stripe size is not allowed for RAID1 or mirror") -+ if self.cache: -+ raise errors.DeviceError("Creating cached LVs with custom stripe size is not supported") -+ - def _assign_pv_space(self): - if not self.is_raid_lv: - # nothing to do for non-RAID (and thus non-striped) LVs here -@@ -2295,7 +2305,7 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, - parent_lv=None, int_type=None, origin=None, vorigin=False, - metadata_size=None, chunk_size=None, profile=None, from_lvs=None, - compression=False, deduplication=False, index_memory=0, -- write_policy=None, cache_mode=None, attach_to=None): -+ write_policy=None, cache_mode=None, attach_to=None, stripe_size=0): - """ - :param name: the device name (generally a device node's basename) - :type name: str -@@ -2375,6 +2385,11 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, - be attached to when created - :type attach_to: :class:`LVMLogicalVolumeDevice` - -+ For RAID LVs only: -+ -+ :keyword stripe_size: size of the RAID stripe -+ :type stripe_size: :class:`~.size.Size` -+ - """ - - if isinstance(parents, (list, ParentList)): -@@ -2395,7 +2410,8 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, - LVMCachePoolMixin.__init__(self, metadata_size, cache_mode, attach_to) - LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type, - fmt, exists, sysfs_path, grow, maxsize, -- percent, cache_request, pvs, from_lvs) -+ percent, cache_request, pvs, from_lvs, -+ stripe_size) - LVMVDOPoolMixin.__init__(self, compression, deduplication, index_memory, - write_policy) - LVMVDOLogicalVolumeMixin.__init__(self) -@@ -2661,8 +2677,12 @@ def _create(self): - pvs = [spec.pv.path for spec in self._pv_specs] - pvs = pvs or None - -+ extra = dict() -+ if self._stripe_size: -+ extra["stripesize"] = str(int(self._stripe_size.convert_to("KiB"))) -+ - blockdev.lvm.lvcreate(self.vg.name, self._name, self.size, -- type=self.seg_type, pv_list=pvs) -+ type=self.seg_type, pv_list=pvs, **extra) - else: - fast_pvs = [pv.path for pv in self.cache.fast_pvs] - -diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py -index 2cb05f547..e2bc82cd9 100644 ---- a/tests/storage_tests/devices_test/lvm_test.py -+++ b/tests/storage_tests/devices_test/lvm_test.py -@@ -1,5 +1,6 @@ - import os - import shutil -+import subprocess - - from ..storagetestcase import StorageTestCase - -@@ -138,7 +139,7 @@ def test_lvm_thin(self): - self.assertTrue(snap.is_snapshot_lv) - self.assertEqual(snap.origin, thinlv) - -- def _test_lvm_raid(self, seg_type, raid_level): -+ def _test_lvm_raid(self, seg_type, raid_level, stripe_size=0): - disk1 = self.storage.devicetree.get_device_by_path(self.vdevs[0]) - self.assertIsNotNone(disk1) - self.storage.initialize_disk(disk1) -@@ -162,7 +163,7 @@ def _test_lvm_raid(self, seg_type, raid_level): - - raidlv = self.storage.new_lv(fmt_type="ext4", size=blivet.size.Size("50 MiB"), - parents=[vg], name="blivetTestRAIDLV", -- seg_type=seg_type, pvs=[pv1, pv2]) -+ seg_type=seg_type, pvs=[pv1, pv2], stripe_size=stripe_size) - self.storage.create_device(raidlv) - - self.storage.do_it() -@@ -174,9 +175,16 @@ def _test_lvm_raid(self, seg_type, raid_level): - self.assertEqual(raidlv.raid_level, raid_level) - self.assertEqual(raidlv.seg_type, seg_type) - -+ if stripe_size: -+ out = subprocess.check_output(["lvs", "-o", "stripe_size", "--noheadings", "--nosuffix", "--units=b", raidlv.vg.name + "/" + raidlv.lvname]) -+ self.assertEqual(out.decode().strip(), str(int(stripe_size.convert_to()))) -+ - def test_lvm_raid_raid0(self): - self._test_lvm_raid("raid0", blivet.devicelibs.raid.RAID0) - -+ def test_lvm_raid_raid0_stripe_size(self): -+ self._test_lvm_raid("raid0", blivet.devicelibs.raid.RAID0, stripe_size=blivet.size.Size("1 MiB")) -+ - def test_lvm_raid_striped(self): - self._test_lvm_raid("striped", blivet.devicelibs.raid.Striped) - -diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py -index 636336c66..83f003c0e 100644 ---- a/tests/unit_tests/devices_test/lvm_test.py -+++ b/tests/unit_tests/devices_test/lvm_test.py -@@ -363,6 +363,33 @@ def test_lvm_logical_volume_pv_free_cached(self): - self.assertEqual(pv.format.free, Size("264 MiB")) - self.assertEqual(pv2.format.free, Size("256 MiB")) - -+ def test_lvm_logical_volume_raid_stripe_size(self): -+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), -+ size=Size("1025 MiB")) -+ pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"), -+ size=Size("513 MiB")) -+ vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2]) -+ -+ with self.assertRaises(blivet.errors.DeviceError): -+ # non-raid LV -+ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"), -+ fmt=blivet.formats.get_format("xfs"), -+ exists=False, stripe_size=Size("1 MiB")) -+ -+ with self.assertRaises(blivet.errors.DeviceError): -+ # raid1 LV -+ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"), -+ fmt=blivet.formats.get_format("xfs"), -+ exists=False, seg_type="raid1", pvs=[pv, pv2], -+ stripe_size=Size("1 MiB")) -+ -+ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"), -+ fmt=blivet.formats.get_format("xfs"), -+ exists=False, seg_type="raid0", pvs=[pv, pv2], -+ stripe_size=Size("1 MiB")) -+ -+ self.assertEqual(lv._stripe_size, Size("1 MiB")) -+ - @patch("blivet.formats.fs.Ext4FS.resizable", return_value=True) - def test_target_size(self, *args): # pylint: disable=unused-argument - pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), diff --git a/0003-Add-support-for-filesystem-online-resize.patch b/0003-Add-support-for-filesystem-online-resize.patch deleted file mode 100644 index 977deae..0000000 --- a/0003-Add-support-for-filesystem-online-resize.patch +++ /dev/null @@ -1,183 +0,0 @@ -From 3ee2a9da6251179c47ff6c2dc5c969e79f31e9e0 Mon Sep 17 00:00:00 2001 -From: Vojtech Trefny -Date: Tue, 4 Apr 2023 13:31:40 +0200 -Subject: [PATCH] Add support for filesystem online resize - ---- - blivet/devices/lvm.py | 10 ++++++---- - blivet/devices/partition.py | 11 ++++++----- - blivet/flags.py | 3 +++ - blivet/formats/fs.py | 32 ++++++++++++++++++++++++++++---- - blivet/formats/fslib.py | 7 +++++++ - 5 files changed, 50 insertions(+), 13 deletions(-) - -diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py -index 434c359f..04e03e82 100644 ---- a/blivet/devices/lvm.py -+++ b/blivet/devices/lvm.py -@@ -42,6 +42,7 @@ from .. import errors - from .. import util - from ..storage_log import log_method_call - from .. import udev -+from ..flags import flags - from ..size import Size, KiB, MiB, ROUND_UP, ROUND_DOWN - from ..static_data.lvm_info import lvs_info - from ..tasks import availability -@@ -2719,10 +2720,11 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin - # Setup VG parents (in case they are dmraid partitions for example) - self.vg.setup_parents(orig=True) - -- if self.original_format.exists: -- self.original_format.teardown() -- if self.format.exists: -- self.format.teardown() -+ if not flags.allow_online_fs_resize: -+ if self.original_format.exists: -+ self.original_format.teardown() -+ if self.format.exists: -+ self.format.teardown() - - udev.settle() - blockdev.lvm.lvresize(self.vg.name, self._name, self.size) -diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py -index e8bd2b7b..5bf3c713 100644 ---- a/blivet/devices/partition.py -+++ b/blivet/devices/partition.py -@@ -794,11 +794,12 @@ class PartitionDevice(StorageDevice): - if not self.exists: - raise errors.DeviceError("device has not been created") - -- # don't teardown when resizing luks -- if self.format.type == "luks" and self.children: -- self.children[0].format.teardown() -- else: -- self.teardown() -+ if not flags.allow_online_fs_resize: -+ # don't teardown when resizing luks -+ if self.format.type == "luks" and self.children: -+ self.children[0].format.teardown() -+ else: -+ self.teardown() - - if not self.sysfs_path: - return -diff --git a/blivet/flags.py b/blivet/flags.py -index f0034070..716f0df4 100644 ---- a/blivet/flags.py -+++ b/blivet/flags.py -@@ -95,6 +95,9 @@ class Flags(object): - # https://uapi-group.org/specifications/specs/discoverable_partitions_specification/ - self.gpt_discoverable_partitions = False - -+ # Allow online filesystem resizes -+ self.allow_online_fs_resize = False -+ - def get_boot_cmdline(self): - with open("/proc/cmdline") as f: - buf = f.read().strip() -diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py -index 8df881b8..acc1f9cb 100644 ---- a/blivet/formats/fs.py -+++ b/blivet/formats/fs.py -@@ -56,7 +56,7 @@ from ..i18n import N_ - from .. import udev - from ..mounts import mounts_cache - --from .fslib import kernel_filesystems -+from .fslib import kernel_filesystems, FSResize - - import logging - log = logging.getLogger("blivet") -@@ -88,6 +88,9 @@ class FS(DeviceFormat): - # value is already unpredictable and can change in the future... - _metadata_size_factor = 1.0 - -+ # support for resize: grow/shrink, online/offline -+ _resize_support = 0 -+ - config_actions_map = {"label": "write_label"} - - def __init__(self, **kwargs): -@@ -436,12 +439,27 @@ class FS(DeviceFormat): - self.write_uuid() - - def _pre_resize(self): -- # file systems need a check before being resized -- self.do_check() -+ if self.status: -+ if flags.allow_online_fs_resize: -+ if self.target_size > self.size and not self._resize_support & FSResize.ONLINE_GROW: -+ raise FSError("This filesystem doesn't support online growing") -+ if self.target_size < self.size and not self._resize_support & FSResize.ONLINE_SHRINK: -+ raise FSError("This filesystem doesn't support online shrinking") -+ else: -+ raise FSError("Resizing of mounted filesystems is disabled") -+ -+ if self.status: -+ # fsck tools in general don't allow checks on mounted filesystems -+ log.debug("Filesystem on %s is mounted, not checking", self.device) -+ else: -+ # file systems need a check before being resized -+ self.do_check() -+ - super(FS, self)._pre_resize() - - def _post_resize(self): -- self.do_check() -+ if not self.status: -+ self.do_check() - super(FS, self)._post_resize() - - def do_check(self): -@@ -838,6 +856,7 @@ class Ext2FS(FS): - _formattable = True - _supported = True - _resizable = True -+ _resize_support = FSResize.ONLINE_GROW | FSResize.OFFLINE_GROW | FSResize.OFFLINE_SHRINK - _linux_native = True - _max_size = Size("8 TiB") - _dump = True -@@ -1097,6 +1116,7 @@ class XFS(FS): - _linux_native = True - _supported = True - _resizable = True -+ _resize_support = FSResize.ONLINE_GROW | FSResize.OFFLINE_GROW - _packages = ["xfsprogs"] - _fsck_class = fsck.XFSCK - _info_class = fsinfo.XFSInfo -@@ -1247,6 +1267,7 @@ class NTFS(FS): - _labelfs = fslabeling.NTFSLabeling() - _uuidfs = fsuuid.NTFSUUID() - _resizable = True -+ _resize_support = FSResize.OFFLINE_GROW | FSResize.OFFLINE_SHRINK - _formattable = True - _supported = True - _min_size = Size("1 MiB") -@@ -1502,6 +1523,9 @@ class TmpFS(NoDevFS): - # same, nothing actually needs to be set - pass - -+ def _pre_resize(self): -+ self.do_check() -+ - def do_resize(self): - # Override superclass method to record whether mount options - # should include an explicit size specification. -diff --git a/blivet/formats/fslib.py b/blivet/formats/fslib.py -index ea93b1fd..8722e942 100644 ---- a/blivet/formats/fslib.py -+++ b/blivet/formats/fslib.py -@@ -36,3 +36,10 @@ def update_kernel_filesystems(): - - - update_kernel_filesystems() -+ -+ -+class FSResize(): -+ OFFLINE_SHRINK = 1 << 1 -+ OFFLINE_GROW = 1 << 2 -+ ONLINE_SHRINK = 1 << 3 -+ ONLINE_GROW = 1 << 4 --- -2.40.1 - diff --git a/0004-Always-prefer-GPT-disk-labels-on-x86_64-and-clean-up.patch b/0004-Always-prefer-GPT-disk-labels-on-x86_64-and-clean-up.patch deleted file mode 100644 index 1969e77..0000000 --- a/0004-Always-prefer-GPT-disk-labels-on-x86_64-and-clean-up.patch +++ /dev/null @@ -1,98 +0,0 @@ -From 7e8c7adaace58d960763225b459a0fc3739f62ee Mon Sep 17 00:00:00 2001 -From: Adam Williamson -Date: Fri, 26 May 2023 15:09:49 -0700 -Subject: [PATCH] Always prefer GPT disk labels on x86_64 (and clean up the - logic) - -See: https://bugzilla.redhat.com/show_bug.cgi?id=2092091#c6 -There was a Fedora 37 Change to prefer GPT disk labels on x86_64 -BIOS installs, but for some reason, this was implemented in -anaconda by having it ignore blivet's ordering of the disk label -types and just universally prefer GPT if it's in the list at all, -which resulted in a preference for GPT in more cases than just -x86_64 BIOS. This is one step towards fixing that, by putting the -'always prefer GPT on x86_64' logic here in the blivet ordering -code where it belongs. Step 2 will be to drop the anaconda code -that overrides blivet's preference order. - -This also simplifies the logic a bit; it had gotten rather a lot -of conditions piled on top of each other and was rather hard to -read. This should achieve the same effect as before in a clearer -and more concise way. - -Signed-off-by: Adam Williamson ---- - blivet/formats/disklabel.py | 7 ++----- - tests/storage_tests/devices_test/partition_test.py | 2 +- - tests/unit_tests/formats_tests/disklabel_test.py | 10 ++++++++++ - 3 files changed, 13 insertions(+), 6 deletions(-) - -diff --git a/blivet/formats/disklabel.py b/blivet/formats/disklabel.py -index 72df9d67..5b4b0a85 100644 ---- a/blivet/formats/disklabel.py -+++ b/blivet/formats/disklabel.py -@@ -223,11 +223,8 @@ class DiskLabel(DeviceFormat): - label_types = ["msdos", "gpt"] - if arch.is_pmac(): - label_types = ["mac"] -- elif arch.is_aarch64(): -- label_types = ["gpt", "msdos"] -- elif arch.is_efi() and arch.is_arm(): -- label_types = ["msdos", "gpt"] -- elif arch.is_efi() and not arch.is_aarch64(): -+ # always prefer gpt on aarch64, x86_64, and EFI plats except 32-bit ARM -+ elif arch.is_aarch64() or arch.is_x86(bits=64) or (arch.is_efi() and not arch.is_arm()): - label_types = ["gpt", "msdos"] - elif arch.is_s390(): - label_types += ["dasd"] -diff --git a/tests/storage_tests/devices_test/partition_test.py b/tests/storage_tests/devices_test/partition_test.py -index ba01c801..d3ff78a3 100644 ---- a/tests/storage_tests/devices_test/partition_test.py -+++ b/tests/storage_tests/devices_test/partition_test.py -@@ -99,7 +99,7 @@ class PartitionDeviceTestCase(unittest.TestCase): - def test_min_max_size_alignment(self): - with sparsetmpfile("minsizetest", Size("10 MiB")) as disk_file: - disk = DiskFile(disk_file) -- disk.format = get_format("disklabel", device=disk.path) -+ disk.format = get_format("disklabel", device=disk.path, label_type="msdos") - grain_size = Size(disk.format.alignment.grainSize) - sector_size = Size(disk.format.parted_device.sectorSize) - start = int(grain_size) -diff --git a/tests/unit_tests/formats_tests/disklabel_test.py b/tests/unit_tests/formats_tests/disklabel_test.py -index f514a778..a7f5e777 100644 ---- a/tests/unit_tests/formats_tests/disklabel_test.py -+++ b/tests/unit_tests/formats_tests/disklabel_test.py -@@ -75,6 +75,7 @@ class DiskLabelTestCase(unittest.TestCase): - arch.is_aarch64.return_value = False - arch.is_arm.return_value = False - arch.is_pmac.return_value = False -+ arch.is_x86.return_value = False - - self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "gpt"]) - -@@ -96,6 +97,14 @@ class DiskLabelTestCase(unittest.TestCase): - self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "gpt"]) - arch.is_arm.return_value = False - -+ # this simulates x86_64 -+ arch.is_x86.return_value = True -+ self.assertEqual(disklabel_class.get_platform_label_types(), ["gpt", "msdos"]) -+ arch.is_efi.return_value = True -+ self.assertEqual(disklabel_class.get_platform_label_types(), ["gpt", "msdos"]) -+ arch.is_x86.return_value = False -+ arch.is_efi.return_value = False -+ - arch.is_s390.return_value = True - self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "gpt", "dasd"]) - arch.is_s390.return_value = False -@@ -138,6 +147,7 @@ class DiskLabelTestCase(unittest.TestCase): - arch.is_aarch64.return_value = False - arch.is_arm.return_value = False - arch.is_pmac.return_value = False -+ arch.is_x86.return_value = False - - with mock.patch.object(dl, '_label_type_size_check') as size_check: - # size check passes for first type ("msdos") --- -2.40.1 -