From 21124af85267ac4745000d8fe3099a1cf5a06ce2 Mon Sep 17 00:00:00 2001 From: Olivier Gayot Date: Wed, 17 Jul 2024 12:20:38 +0200 Subject: [PATCH] storage: don't suggest use-gap if we can't fit more partitions If a disk has already reached its limit of (primary) partitions, suggesting a use-gap scenario can only lead to failures. In the same vein, if a disk only has room for one more partition but we also need to add an ESP, the scenario would fail to apply too. We now try to prevent Subiquity from suggesting such, bad, use-gap scenarios ; by guessing the number of primary partitions needed ; and comparing it with the limit. Although it isn't a perfect solution (see below), this change should hopefully address the most common problems. What makes it difficult to properly suggest scenarios is that we do not have all the cards in hand with the current API. For instance, the user can choose to add a recovery partition, which of course influences the number of partitions required. Sadly, we don't have a way with the current API to allow a scenario without recovery partition ; while rejecting the same scenario with a recovery partition. Capabilities can also influence the number of required partitions, supposedly. LP: #2073378 Signed-off-by: Olivier Gayot --- subiquity/server/controllers/filesystem.py | 17 ++++++ .../controllers/tests/test_filesystem.py | 60 +++++++++++++++++-- 2 files changed, 71 insertions(+), 6 deletions(-) diff --git a/subiquity/server/controllers/filesystem.py b/subiquity/server/controllers/filesystem.py index 1417eceb9..ed7923ee8 100644 --- a/subiquity/server/controllers/filesystem.py +++ b/subiquity/server/controllers/filesystem.py @@ -1166,6 +1166,23 @@ def available_use_gap_scenarios( # Do we return a reason here? continue + # Now we check if we have enough room for all the primary + # partitions. This isn't failproof but should limit the number of + # UseGaps scenarios that are suggested but can't be applied because + # we don't have enough room for partitions. + new_primary_parts = 0 + if not gap.in_extended: + new_primary_parts += 1 # For the rootfs to create. + new_primary_parts += boot.get_boot_device_plan(disk).new_partition_count() + # In theory, there could be a recovery partition as well. Not sure + # how to account for it since we don't know yet if one will be + # requested. + if new_primary_parts > gaps.remaining_primary_partitions( + disk, disk.alignment_data() + ): + log.error("skipping UseGap: not enough room for primary partitions") + continue + capability_info = CapabilityInfo() for variation in self._variation_info.values(): if variation.is_core_boot_classic(): diff --git a/subiquity/server/controllers/tests/test_filesystem.py b/subiquity/server/controllers/tests/test_filesystem.py index 15654f30f..4ca8e816a 100644 --- a/subiquity/server/controllers/tests/test_filesystem.py +++ b/subiquity/server/controllers/tests/test_filesystem.py @@ -24,7 +24,7 @@ from curtin.commands.extract import TrivialSourceHandler from jsonschema.validators import validator_for -from subiquity.common.filesystem import gaps, labels +from subiquity.common.filesystem import boot, gaps, labels from subiquity.common.filesystem.actions import DeviceAction from subiquity.common.types.storage import ( AddPartitionV2, @@ -1419,17 +1419,65 @@ async def test_in_use_reformat_and_gap(self, bootloader, ptable): GuidedStorageTargetReformat( disk_id=self.disk.id, allowed=default_capabilities ), - GuidedStorageTargetUseGap( - disk_id=self.disk.id, - allowed=default_capabilities, - gap=labels.for_client(gaps.largest_gap(self.disk)), - ), GuidedStorageTargetManual(), ] + # VTOC does not have enough room for an ESP + 3 partitions, presumably. + if ptable != "vtoc" or bootloader == Bootloader.NONE: + expected.insert( + 1, + GuidedStorageTargetUseGap( + disk_id=self.disk.id, + allowed=default_capabilities, + gap=labels.for_client(gaps.largest_gap(self.disk)), + ), + ) resp = await self.fsc.v2_guided_GET() self.assertEqual(expected, resp.targets) self.assertEqual(ProbeStatus.DONE, resp.status) + @parameterized.expand( + ( + (1, 4, True, True), + (1, 4, False, True), + (4, 4, True, False), + (4, 4, False, False), + (3, 4, False, True), + (3, 4, True, False), + (127, 128, True, False), + (127, 128, False, True), + ) + ) + async def test_available_use_gap_scenarios( + self, + existing_primaries: int, + max_primaries: int, + create_boot_part: bool, + expected_scenario: bool, + ): + await self._setup(Bootloader.NONE, "gpt", fix_bios=True) + install_min = self.fsc.calculate_suggested_install_min() + + for _ in range(existing_primaries): + make_partition(self.model, self.disk, preserve=True, size=4 << 20) + + p_max_primaries = mock.patch.object( + self.fsc.model._partition_alignment_data["gpt"], + "primary_part_limit", + max_primaries, + ) + if create_boot_part: + boot_plan = boot.CreatePartPlan(mock.Mock(), mock.Mock(), mock.Mock()) + else: + boot_plan = boot.NoOpBootPlan() + p_boot_plan = mock.patch( + "subiquity.server.controllers.filesystem.boot.get_boot_device_plan", + return_value=boot_plan, + ) + with p_max_primaries, p_boot_plan: + scenarios = self.fsc.available_use_gap_scenarios(install_min) + + self.assertEqual(expected_scenario, scenarios != []) + class TestManualBoot(IsolatedAsyncioTestCase): def _setup(self, bootloader, ptable, **kw):