Skip to content

Commit

Permalink
storage: don't suggest use-gap if we can't fit more partitions
Browse files Browse the repository at this point in the history
If a disk has already reached its limit of (primary) partitions,
suggesting a use-gap scenario can only lead to failures. In the same
vein, if a disk only has room for one more partition but we also need to
add an ESP, the scenario would fail to apply too.

We now try to prevent Subiquity from suggesting such, bad, use-gap
scenarios ; by guessing the number of primary partitions needed ; and
comparing it with the limit.

Although it isn't a perfect solution (see below), this change should
hopefully address the most common problems.

What makes it difficult to properly suggest scenarios is that we do not
have all the cards in hand with the current API. For instance, the user
can choose to add a recovery partition, which of course influences the
number of partitions required. Sadly, we don't have a way with the
current API to allow a scenario without recovery partition ; while
rejecting the same scenario with a recovery partition. Capabilities can
also influence the number of required partitions, supposedly.

LP: #2073378

Signed-off-by: Olivier Gayot <[email protected]>
  • Loading branch information
ogayot committed Jul 23, 2024
1 parent 8c557b8 commit 21124af
Show file tree
Hide file tree
Showing 2 changed files with 71 additions and 6 deletions.
17 changes: 17 additions & 0 deletions subiquity/server/controllers/filesystem.py
Original file line number Diff line number Diff line change
Expand Up @@ -1166,6 +1166,23 @@ def available_use_gap_scenarios(
# Do we return a reason here?
continue

# Now we check if we have enough room for all the primary
# partitions. This isn't failproof but should limit the number of
# UseGaps scenarios that are suggested but can't be applied because
# we don't have enough room for partitions.
new_primary_parts = 0
if not gap.in_extended:
new_primary_parts += 1 # For the rootfs to create.
new_primary_parts += boot.get_boot_device_plan(disk).new_partition_count()
# In theory, there could be a recovery partition as well. Not sure
# how to account for it since we don't know yet if one will be
# requested.
if new_primary_parts > gaps.remaining_primary_partitions(
disk, disk.alignment_data()
):
log.error("skipping UseGap: not enough room for primary partitions")
continue

capability_info = CapabilityInfo()
for variation in self._variation_info.values():
if variation.is_core_boot_classic():
Expand Down
60 changes: 54 additions & 6 deletions subiquity/server/controllers/tests/test_filesystem.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from curtin.commands.extract import TrivialSourceHandler
from jsonschema.validators import validator_for

from subiquity.common.filesystem import gaps, labels
from subiquity.common.filesystem import boot, gaps, labels
from subiquity.common.filesystem.actions import DeviceAction
from subiquity.common.types.storage import (
AddPartitionV2,
Expand Down Expand Up @@ -1419,17 +1419,65 @@ async def test_in_use_reformat_and_gap(self, bootloader, ptable):
GuidedStorageTargetReformat(
disk_id=self.disk.id, allowed=default_capabilities
),
GuidedStorageTargetUseGap(
disk_id=self.disk.id,
allowed=default_capabilities,
gap=labels.for_client(gaps.largest_gap(self.disk)),
),
GuidedStorageTargetManual(),
]
# VTOC does not have enough room for an ESP + 3 partitions, presumably.
if ptable != "vtoc" or bootloader == Bootloader.NONE:
expected.insert(
1,
GuidedStorageTargetUseGap(
disk_id=self.disk.id,
allowed=default_capabilities,
gap=labels.for_client(gaps.largest_gap(self.disk)),
),
)
resp = await self.fsc.v2_guided_GET()
self.assertEqual(expected, resp.targets)
self.assertEqual(ProbeStatus.DONE, resp.status)

@parameterized.expand(
(
(1, 4, True, True),
(1, 4, False, True),
(4, 4, True, False),
(4, 4, False, False),
(3, 4, False, True),
(3, 4, True, False),
(127, 128, True, False),
(127, 128, False, True),
)
)
async def test_available_use_gap_scenarios(
self,
existing_primaries: int,
max_primaries: int,
create_boot_part: bool,
expected_scenario: bool,
):
await self._setup(Bootloader.NONE, "gpt", fix_bios=True)
install_min = self.fsc.calculate_suggested_install_min()

for _ in range(existing_primaries):
make_partition(self.model, self.disk, preserve=True, size=4 << 20)

p_max_primaries = mock.patch.object(
self.fsc.model._partition_alignment_data["gpt"],
"primary_part_limit",
max_primaries,
)
if create_boot_part:
boot_plan = boot.CreatePartPlan(mock.Mock(), mock.Mock(), mock.Mock())
else:
boot_plan = boot.NoOpBootPlan()
p_boot_plan = mock.patch(
"subiquity.server.controllers.filesystem.boot.get_boot_device_plan",
return_value=boot_plan,
)
with p_max_primaries, p_boot_plan:
scenarios = self.fsc.available_use_gap_scenarios(install_min)

self.assertEqual(expected_scenario, scenarios != [])


class TestManualBoot(IsolatedAsyncioTestCase):
def _setup(self, bootloader, ptable, **kw):
Expand Down

0 comments on commit 21124af

Please sign in to comment.