Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion service/lib/y2storage/proposal/agama_md_creator.rb
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,13 @@ def create(planned)
devices = md_members(planned)
devices.map(&:remove_descendants)
md.sorted_devices = devices
planned.format!(md) if planned.partitions.empty?
if planned.partitions.empty?
planned.format!(md)
else
# FIXME: This modifies the original planned device object. That looks like the safer
# approach as a hotfix for bsc#1253145.
planned.partitions.each { |p| p.disk = md.name }
end

creator_result.merge!(CreatorResult.new(devicegraph, md.name => planned))

Expand Down
6 changes: 6 additions & 0 deletions service/package/rubygem-agama-yast.changes
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
-------------------------------------------------------------------
Fri Nov 7 15:14:51 UTC 2025 - Ancor Gonzalez Sosa <ancor@suse.com>

- Fixed an error in the calculation of partitions when several
MD RAIDs are created (bsc#1253145).

-------------------------------------------------------------------
Fri Oct 31 13:48:46 UTC 2025 - Ancor Gonzalez Sosa <ancor@suse.com>

Expand Down
2 changes: 2 additions & 0 deletions service/test/agama/storage/autoyast_proposal_test.rb
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@
before do
mock_storage(devicegraph: scenario)
allow(Y2Storage::Arch).to receive(:new).and_return(arch)
# This environment is enforced by Agama
allow(Y2Storage::StorageEnv.instance).to receive(:no_bls_bootloader).and_return true
end

let(:scenario) { "windows-linux-pc.yml" }
Expand Down
16 changes: 16 additions & 0 deletions service/test/fixtures/nvme-disks.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
---
- disk:
name: "/dev/nvme0n1"
size: 750 GiB

- disk:
name: "/dev/nvme1n1"
size: 750 GiB

- disk:
name: "/dev/nvme2n1"
size: 750 GiB

- disk:
name: "/dev/nvme3n1"
size: 750 GiB
11 changes: 8 additions & 3 deletions service/test/y2storage/agama_proposal_md_lvm_test.rb
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@
an_object_having_attributes(name: "/dev/md0", md_level: Y2Storage::MdLevel::RAID1),
an_object_having_attributes(name: "/dev/md1", md_level: Y2Storage::MdLevel::RAID0)
)
md0 = devicegraph.find_by_any_name("/dev/md0")
md0 = devicegraph.md_raids.find { |r| r.md_level.is?(:raid0) }
md0_formatted = md0.partitions.select(&:formatted?)
expect(md0_formatted.map(&:filesystem).map(&:mount_path)).to include "/extra"

Expand All @@ -268,12 +268,17 @@
"/dev/system/root", "/dev/system/swap"
)
pvs_system = vg_system.lvm_pvs.map(&:blk_device)
expect(pvs_system.map(&:name)).to contain_exactly("/dev/md0p2", "/dev/md1p2")
expect(pvs_system.map(&:partitionable).map(&:name))
.to contain_exactly("/dev/md0", "/dev/md1")

vg1 = devicegraph.find_by_any_name("/dev/vg1")
expect(vg1.lvm_lvs.map(&:name)).to contain_exactly("/dev/vg1/home")
pvs_vg1 = vg1.lvm_pvs.map(&:blk_device)
expect(pvs_vg1.map(&:name)).to contain_exactly("/dev/mapper/cr_md1p1")
expect(pvs_vg1.size).to eq 1

pv = pvs_vg1.first
expect(pv.is?(:encryption)).to eq true
expect(pv.blk_device.partitionable).to eq md0
end
end
end
Expand Down
70 changes: 70 additions & 0 deletions service/test/y2storage/agama_proposal_md_test.rb
Original file line number Diff line number Diff line change
Expand Up @@ -372,5 +372,75 @@
expect(md.partitions.first.id).to eq Y2Storage::PartitionId::BIOS_BOOT
end
end

context "when creating several MD Raids with several partitions each" do
let(:scenario) { "nvme-disks.yaml" }

let(:config_json) do
{
drives: [
{
search: "/dev/nvme0n1",
partitions: [
{ alias: "nvme0n1-p0", id: "raid", size: { min: "1 MiB" } }
]
},
{
search: "/dev/nvme1n1",
partitions: [
{ alias: "nvme1n1-p0", id: "raid", size: { min: "1 MiB" } }
]
},
{
search: "/dev/nvme2n1",
partitions: [
{ alias: "nvme2n1-p0", id: "raid", size: { min: "1 MiB" } }
]
},
{
search: "/dev/nvme3n1",
partitions: [
{ alias: "nvme3n1-p0", id: "raid", size: { min: "1 MiB" } }
]
}
],
mdRaids: [
{
level: "raid0",
devices: ["nvme0n1-p0", "nvme1n1-p0"],
partitions: [
{ size: "512 MiB", filesystem: { path: "/boot/efi" } },
{ size: "512 MiB", filesystem: { path: "/boot" } },
{ size: "93 GiB", filesystem: { path: "/" } },
{ size: { min: "1 MiB" }, filesystem: { path: "/data0" } }
]
},
{
level: "raid1",
devices: ["nvme2n1-p0", "nvme3n1-p0"],
partitions: [
{ size: { min: "1 MiB" }, filesystem: { path: "/data1" } }
]
}
],
boot: { configure: false }
}
end

# Regression test for bsc#1253145
# The small partitions /boot and /boot/efi were located to the second RAID to optimize space
# distribution. Something that should obviously not happen.
it "locates each partition into its corresponding RAID device" do
devicegraph = proposal.propose

raid0 = devicegraph.md_raids.find { |i| i.md_level.is?(:raid0) }
expect(raid0.partitions.map(&:filesystem).map(&:mount_path)).to contain_exactly(
"/boot", "/boot/efi", "/", "/data0"
)

raid1 = devicegraph.md_raids.find { |i| i.md_level.is?(:raid1) }
expect(raid1.partitions.map(&:filesystem).map(&:mount_path)).to eq ["/data1"]
end
end
end
end