diff --git a/modules/axdma/src/dma.rs b/modules/axdma/src/dma.rs index a1eae85f76..81ae42c97f 100644 --- a/modules/axdma/src/dma.rs +++ b/modules/axdma/src/dma.rs @@ -2,7 +2,10 @@ use core::{alloc::Layout, ptr::NonNull}; use allocator::{AllocError, AllocResult, BaseAllocator, ByteAllocator}; use axalloc::{DefaultByteAllocator, global_allocator}; -use axhal::{mem::virt_to_phys, paging::MappingFlags}; +use axhal::{ + mem::virt_to_phys, + paging::{MappingFlags, PageSize}, +}; use kspin::SpinNoIrq; use log::{debug, error}; use memory_addr::{PAGE_SIZE_4K, VirtAddr, va}; @@ -94,7 +97,7 @@ impl DmaAllocator { let expand_size = num_pages * PAGE_SIZE_4K; axmm::kernel_aspace() .lock() - .protect(vaddr, expand_size, flags) + .protect(vaddr, expand_size, flags, PageSize::Size4K) .map_err(|e| { error!("change table flag fail: {e:?}"); AllocError::NoMemory diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index f9691ec10f..f25c7d7b53 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -4,11 +4,12 @@ use axerrno::{AxError, AxResult, ax_err}; use axhal::mem::phys_to_virt; use axhal::paging::{MappingFlags, PageTable, PagingError}; use memory_addr::{ - MemoryAddr, PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, is_aligned_4k, + MemoryAddr, PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, is_aligned, }; use memory_set::{MemoryArea, MemorySet}; +use page_table_multiarch::PageSize; -use crate::backend::Backend; +use crate::backend::{Backend, PageIterWrapper}; use crate::mapping_err_to_ax_err; /// The virtual memory address space. @@ -85,28 +86,84 @@ impl AddrSpace { self.pt.clear_copy_range(range.start, range.size()); } - fn validate_region(&self, start: VirtAddr, size: usize) -> AxResult { + /// The page table hardware can only map address ranges that are page-aligned. + /// During the memory region validation in AddrSpace, + /// the system enforces address alignment, + /// ensuring that all memory operations comply with page boundary requirements. + fn validate_region(&self, start: VirtAddr, size: usize, align: PageSize) -> AxResult { if !self.contains_range(start, size) { return ax_err!(InvalidInput, "address out of range"); } - if !start.is_aligned_4k() || !is_aligned_4k(size) { + if !start.is_aligned(align) || !is_aligned(size, align.into()) { return ax_err!(InvalidInput, "address not aligned"); } Ok(()) } - /// Finds a free area that can accommodate the given size. + /// Searches for a contiguous free region in the virtual address space /// - /// The search starts from the given hint address, and the area should be within the given limit range. + /// This function searches for available virtual address space within a specified address range, + /// based on the current memory region layout, that satisfies the size and alignment requirements. /// - /// Returns the start address of the free area. Returns None if no such area is found. + /// # Parameters + /// - `hint`: Suggested starting address for the search (may be adjusted due to alignment or overlapping regions) + /// - `size`: Size of the contiguous address space to allocate (in bytes) + /// - `limit`: Boundary of the allowed address range (inclusive of start and end addresses) + /// - `align`: Address alignment requirement (e.g., page alignment like 4KB/2MB) + /// + /// # Return Value + /// - `Some(VirtAddr)`: A starting virtual address that meets all requirements was found + /// - `None`: No sufficient space was found within the specified range + /// + /// # Implementation Logic + /// 1. Initialize `last_end` to the maximum aligned value between the hint and the start of the limit range + /// 2. First pass: handle regions before the hint to determine the initial search position + /// 3. Second pass: check gaps between regions: + /// - Skip overlapping and already occupied regions + /// - Check whether the gap between regions satisfies the `size + alignment` requirement + /// 4. Finally, verify that the found address is within the specified `limit` range + /// + /// # Notes + /// - Alignment is strictly enforced on candidate addresses (ensured via `align_up`) + /// - The region must be fully contained within the `limit` range (`end <= limit.end`) + /// - The search may ignore the `hint` if a better space is found in later regions pub fn find_free_area( &self, hint: VirtAddr, size: usize, limit: VirtAddrRange, + align: PageSize, ) -> Option { - self.areas.find_free_area(hint, size, limit) + let mut last_end = hint.max(limit.start).align_up(align); + for area in self.areas.iter() { + if area.end() <= last_end { + last_end = last_end.max(area.end().align_up(align)); + } else { + break; + } + } + for area in self.areas.iter() { + let area_start = area.start(); + if area_start < last_end { + continue; + } + if last_end + .checked_add(size) + .is_some_and(|end| end <= area_start) + { + return Some(last_end); + } + last_end = area.end().align_up(align); + } + + if last_end + .checked_add(size) + .is_some_and(|end| end <= limit.end) + { + Some(last_end) + } else { + None + } } /// Add a new linear mapping. @@ -123,14 +180,16 @@ impl AddrSpace { start_paddr: PhysAddr, size: usize, flags: MappingFlags, + align: PageSize, ) -> AxResult { - self.validate_region(start_vaddr, size)?; - if !start_paddr.is_aligned_4k() { + self.validate_region(start_vaddr, size, align)?; + + if !start_paddr.is_aligned(align) { return ax_err!(InvalidInput, "address not aligned"); } let offset = start_vaddr.as_usize() - start_paddr.as_usize(); - let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset)); + let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset, align)); self.areas .map(area, &mut self.pt, false) .map_err(mapping_err_to_ax_err)?; @@ -151,10 +210,11 @@ impl AddrSpace { size: usize, flags: MappingFlags, populate: bool, + align: PageSize, ) -> AxResult { - self.validate_region(start, size)?; + self.validate_region(start, size, align)?; - let area = MemoryArea::new(start, size, flags, Backend::new_alloc(populate)); + let area = MemoryArea::new(start, size, flags, Backend::new_alloc(populate, align)); self.areas .map(area, &mut self.pt, false) .map_err(mapping_err_to_ax_err)?; @@ -163,15 +223,15 @@ impl AddrSpace { /// Populates the area with physical frames, returning false if the area /// contains unmapped area. - pub fn populate_area(&mut self, mut start: VirtAddr, size: usize) -> AxResult { - self.validate_region(start, size)?; + pub fn populate_area(&mut self, mut start: VirtAddr, size: usize, align: PageSize) -> AxResult { + self.validate_region(start, size, align)?; let end = start + size; while let Some(area) = self.areas.find(start) { let backend = area.backend(); - if let Backend::Alloc { populate } = backend { - if !*populate { - for addr in PageIter4K::new(start, area.end().min(end)).unwrap() { + if let Backend::Alloc { populate, align } = *backend { + if !populate { + for addr in PageIterWrapper::new(start, area.end().min(end), align).unwrap() { match self.pt.query(addr) { Ok(_) => {} // If the page is not mapped, try map it. @@ -186,7 +246,7 @@ impl AddrSpace { } } start = area.end(); - assert!(start.is_aligned_4k()); + assert!(start.is_aligned(align)); if start >= end { break; } @@ -205,7 +265,29 @@ impl AddrSpace { /// Returns an error if the address range is out of the address space or not /// aligned. pub fn unmap(&mut self, start: VirtAddr, size: usize) -> AxResult { - self.validate_region(start, size)?; + self.validate_region(start, size, PageSize::Size4K)?; + + let end = start + size; + for area in self + .areas + .iter() + .skip_while(move |a| a.end() <= start) + .take_while(move |a| a.start() < end) + { + let area_align = match *area.backend() { + Backend::Alloc { populate: _, align } => align, + Backend::Linear { + pa_va_offset: _, + align, + } => align, + }; + + let unmap_start = start.max(area.start()); + let unmap_size = end.min(area.end()) - unmap_start; + if !unmap_start.is_aligned(area_align) || !is_aligned(unmap_size, area_align.into()) { + return ax_err!(InvalidInput, "address not aligned"); + } + } self.areas .unmap(start, size, &mut self.pt) @@ -215,16 +297,6 @@ impl AddrSpace { /// To remove user area mappings from address space. pub fn unmap_user_areas(&mut self) -> AxResult { - for area in self.areas.iter() { - assert!(area.start().is_aligned_4k()); - assert!(area.size() % PAGE_SIZE_4K == 0); - assert!(area.flags().contains(MappingFlags::USER)); - assert!( - self.va_range - .contains_range(VirtAddrRange::from_start_size(area.start(), area.size())), - "MemorySet contains out-of-va-range area" - ); - } self.areas.clear(&mut self.pt).unwrap(); Ok(()) } @@ -241,11 +313,11 @@ impl AddrSpace { /// /// # Notes /// The caller must ensure that the permission of the operation is allowed. - fn process_area_data(&self, start: VirtAddr, size: usize, f: F) -> AxResult + fn process_area_data(&self, start: VirtAddr, size: usize, align: PageSize, f: F) -> AxResult where F: FnMut(VirtAddr, usize, usize), { - Self::process_area_data_with_page_table(&self.pt, &self.va_range, start, size, f) + Self::process_area_data_with_page_table(&self.pt, &self.va_range, start, size, align, f) } fn process_area_data_with_page_table( @@ -253,6 +325,7 @@ impl AddrSpace { va_range: &VirtAddrRange, start: VirtAddr, size: usize, + align: PageSize, mut f: F, ) -> AxResult where @@ -263,8 +336,9 @@ impl AddrSpace { } let mut cnt = 0; // If start is aligned to 4K, start_align_down will be equal to start_align_up. - let end_align_up = (start + size).align_up_4k(); - for vaddr in PageIter4K::new(start.align_down_4k(), end_align_up) + let end_align_up = (start + size).align_up(align); + let start_addr = start.align_down(align); + for vaddr in PageIterWrapper::new(start_addr, end_align_up, align) .expect("Failed to create page iterator") { let (mut paddr, _, _) = pt.query(vaddr).map_err(|_| AxError::BadAddress)?; @@ -274,9 +348,9 @@ impl AddrSpace { if copy_size == 0 { break; } - if vaddr == start.align_down_4k() && start.align_offset_4k() != 0 { - let align_offset = start.align_offset_4k(); - copy_size = copy_size.min(PAGE_SIZE_4K - align_offset); + if vaddr == start.align_down(align) && start.align_offset(align) != 0 { + let align_offset = start.align_offset(align); + copy_size = copy_size.min(align as usize - align_offset); paddr += align_offset; } f(phys_to_virt(paddr), cnt, copy_size); @@ -291,8 +365,8 @@ impl AddrSpace { /// /// * `start` - The start virtual address to read. /// * `buf` - The buffer to store the data. - pub fn read(&self, start: VirtAddr, buf: &mut [u8]) -> AxResult { - self.process_area_data(start, buf.len(), |src, offset, read_size| unsafe { + pub fn read(&self, start: VirtAddr, align: PageSize, buf: &mut [u8]) -> AxResult { + self.process_area_data(start, buf.len(), align, |src, offset, read_size| unsafe { core::ptr::copy_nonoverlapping(src.as_ptr(), buf.as_mut_ptr().add(offset), read_size); }) } @@ -303,8 +377,8 @@ impl AddrSpace { /// /// * `start_vaddr` - The start virtual address to write. /// * `buf` - The buffer to write to the address space. - pub fn write(&self, start: VirtAddr, buf: &[u8]) -> AxResult { - self.process_area_data(start, buf.len(), |dst, offset, write_size| unsafe { + pub fn write(&self, start: VirtAddr, align: PageSize, buf: &[u8]) -> AxResult { + self.process_area_data(start, buf.len(), align, |dst, offset, write_size| unsafe { core::ptr::copy_nonoverlapping(buf.as_ptr().add(offset), dst.as_mut_ptr(), write_size); }) } @@ -313,9 +387,15 @@ impl AddrSpace { /// /// Returns an error if the address range is out of the address space or not /// aligned. - pub fn protect(&mut self, start: VirtAddr, size: usize, flags: MappingFlags) -> AxResult { + pub fn protect( + &mut self, + start: VirtAddr, + size: usize, + flags: MappingFlags, + align: PageSize, + ) -> AxResult { // Populate the area first, which also checks the address range for us. - self.populate_area(start, size)?; + self.populate_area(start, size, align)?; self.areas .protect(start, size, |_| Some(flags), &mut self.pt) diff --git a/modules/axmm/src/backend/alloc.rs b/modules/axmm/src/backend/alloc.rs index d5ec5787e6..2537e832bf 100644 --- a/modules/axmm/src/backend/alloc.rs +++ b/modules/axmm/src/backend/alloc.rs @@ -1,28 +1,68 @@ +use crate::backend::page_iter_wrapper::PageIterWrapper; use axalloc::global_allocator; use axhal::mem::{phys_to_virt, virt_to_phys}; use axhal::paging::{MappingFlags, PageSize, PageTable}; -use memory_addr::{PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr}; +use memory_addr::{PAGE_SIZE_4K, PhysAddr, VirtAddr}; use super::Backend; -fn alloc_frame(zeroed: bool) -> Option { - let vaddr = VirtAddr::from(global_allocator().alloc_pages(1, PAGE_SIZE_4K).ok()?); +/// Allocates a physical frame, with an option to zero it out. +/// +/// This function allocates physical memory with the specified alignment and +/// returns the corresponding physical address. If allocation fails, it returns `None`. +/// +/// # Parameters +/// - `zeroed`: If `true`, the allocated memory will be zero-initialized. +/// - `align`: Alignment requirement for the allocated memory, must be a multiple of 4KiB. +/// +/// # Returns +/// - `Some(PhysAddr)`: The physical address if the allocation is successful. +/// - `None`: Returned if the memory allocation fails. +/// +/// # Notes +/// - This function uses the global memory allocator to allocate memory, with the size +/// determined by the `align` parameter (in page units). +/// - If `zeroed` is `true`, the function uses `unsafe` operations to zero out the memory. +/// - The allocated memory must be accessed via its physical address, which requires +/// conversion using `virt_to_phys`. +fn alloc_frame(zeroed: bool, align: PageSize) -> Option { + let page_size: usize = align.into(); + let num_pages = page_size / PAGE_SIZE_4K; + let vaddr = VirtAddr::from(global_allocator().alloc_pages(num_pages, page_size).ok()?); if zeroed { - unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, PAGE_SIZE_4K) }; + unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, page_size) }; } let paddr = virt_to_phys(vaddr); Some(paddr) } -fn dealloc_frame(frame: PhysAddr) { +/// Frees a physical frame of memory with the specified alignment. +/// +/// This function converts the given physical address to a virtual address, +/// and then frees the corresponding memory pages using the global memory allocator. +/// The size of the memory to be freed is determined by the `align` parameter, +/// which must be a multiple of 4KiB. +/// +/// # Parameters +/// - `frame`: The physical address of the memory to be freed. +/// - `align`: The alignment requirement for the memory, must be a multiple of 4KiB. +/// +/// # Notes +/// - This function assumes that the provided `frame` was allocated using `alloc_frame`, +/// otherwise undefined behavior may occur. +/// - If the deallocation fails, the function will call `panic!`. Details about +/// the failure can be obtained from the global memory allocator’s error messages. +fn dealloc_frame(frame: PhysAddr, align: PageSize) { + let page_size: usize = align.into(); + let num_pages = page_size / PAGE_SIZE_4K; let vaddr = phys_to_virt(frame); - global_allocator().dealloc_pages(vaddr.as_usize(), 1); + global_allocator().dealloc_pages(vaddr.as_usize(), num_pages); } impl Backend { /// Creates a new allocation mapping backend. - pub const fn new_alloc(populate: bool) -> Self { - Self::Alloc { populate } + pub const fn new_alloc(populate: bool, align: PageSize) -> Self { + Self::Alloc { populate, align } } pub(crate) fn map_alloc( @@ -31,6 +71,7 @@ impl Backend { flags: MappingFlags, pt: &mut PageTable, populate: bool, + align: PageSize, ) -> bool { debug!( "map_alloc: [{:#x}, {:#x}) {:?} (populate={})", @@ -41,12 +82,14 @@ impl Backend { ); if populate { // allocate all possible physical frames for populated mapping. - for addr in PageIter4K::new(start, start + size).unwrap() { - if let Some(frame) = alloc_frame(true) { - if let Ok(tlb) = pt.map(addr, frame, PageSize::Size4K, flags) { - tlb.ignore(); // TLB flush on map is unnecessary, as there are no outdated mappings. - } else { - return false; + if let Some(iter) = PageIterWrapper::new(start, start + size, align) { + for addr in iter { + if let Some(frame) = alloc_frame(true, align) { + if let Ok(tlb) = pt.map(addr, frame, align, flags) { + tlb.ignore(); // TLB flush on map is unnecessary, as there are no outdated mappings. + } else { + return false; + } } } } @@ -61,19 +104,19 @@ impl Backend { size: usize, pt: &mut PageTable, _populate: bool, + align: PageSize, ) -> bool { debug!("unmap_alloc: [{:#x}, {:#x})", start, start + size); - for addr in PageIter4K::new(start, start + size).unwrap() { - if let Ok((frame, page_size, tlb)) = pt.unmap(addr) { - // Deallocate the physical frame if there is a mapping in the - // page table. - if page_size.is_huge() { - return false; + if let Some(iter) = PageIterWrapper::new(start, start + size, align) { + for addr in iter { + if let Ok((frame, _page_size, tlb)) = pt.unmap(addr) { + // Deallocate the physical frame if there is a mapping in the + // page table. + tlb.flush(); + dealloc_frame(frame, align); + } else { + // Deallocation is needn't if the page is not mapped. } - tlb.flush(); - dealloc_frame(frame); - } else { - // Deallocation is needn't if the page is not mapped. } } true @@ -84,14 +127,15 @@ impl Backend { orig_flags: MappingFlags, pt: &mut PageTable, populate: bool, + align: PageSize, ) -> bool { if populate { false // Populated mappings should not trigger page faults. - } else if let Some(frame) = alloc_frame(true) { + } else if let Some(frame) = alloc_frame(true, align) { // Allocate a physical frame lazily and map it to the fault address. // `vaddr` does not need to be aligned. It will be automatically // aligned during `pt.map` regardless of the page size. - pt.map(vaddr, frame, PageSize::Size4K, orig_flags) + pt.map(vaddr, frame, align, orig_flags) .map(|tlb| tlb.flush()) .is_ok() } else { diff --git a/modules/axmm/src/backend/linear.rs b/modules/axmm/src/backend/linear.rs index e324a1e11e..8cea6dd039 100644 --- a/modules/axmm/src/backend/linear.rs +++ b/modules/axmm/src/backend/linear.rs @@ -1,12 +1,16 @@ use axhal::paging::{MappingFlags, PageTable}; use memory_addr::{PhysAddr, VirtAddr}; +use page_table_multiarch::PageSize; use super::Backend; impl Backend { /// Creates a new linear mapping backend. - pub const fn new_linear(pa_va_offset: usize) -> Self { - Self::Linear { pa_va_offset } + pub const fn new_linear(pa_va_offset: usize, align: PageSize) -> Self { + Self::Linear { + pa_va_offset, + align, + } } pub(crate) fn map_linear( @@ -25,7 +29,7 @@ impl Backend { va_to_pa(start + size), flags ); - pt.map_region(start, va_to_pa, size, flags, false, false) + pt.map_region(start, va_to_pa, size, flags, true, false) .map(|tlb| tlb.ignore()) // TLB flush on map is unnecessary, as there are no outdated mappings. .is_ok() } diff --git a/modules/axmm/src/backend/mod.rs b/modules/axmm/src/backend/mod.rs index be58b3e59d..378b685fa0 100644 --- a/modules/axmm/src/backend/mod.rs +++ b/modules/axmm/src/backend/mod.rs @@ -3,9 +3,12 @@ use axhal::paging::{MappingFlags, PageTable}; use memory_addr::VirtAddr; use memory_set::MappingBackend; +pub use page_iter_wrapper::PageIterWrapper; +use page_table_multiarch::PageSize; mod alloc; mod linear; +mod page_iter_wrapper; /// A unified enum type for different memory mapping backends. /// @@ -25,6 +28,8 @@ pub enum Backend { Linear { /// `vaddr - paddr`. pa_va_offset: usize, + /// Alignment parameters for the starting address and memory range. + align: PageSize, }, /// Allocation mapping backend. /// @@ -35,6 +40,8 @@ pub enum Backend { Alloc { /// Whether to populate the physical frames when creating the mapping. populate: bool, + /// Alignment parameters for the starting address and memory range. + align: PageSize, }, } @@ -44,15 +51,23 @@ impl MappingBackend for Backend { type PageTable = PageTable; fn map(&self, start: VirtAddr, size: usize, flags: MappingFlags, pt: &mut PageTable) -> bool { match *self { - Self::Linear { pa_va_offset } => Self::map_linear(start, size, flags, pt, pa_va_offset), - Self::Alloc { populate } => Self::map_alloc(start, size, flags, pt, populate), + Self::Linear { + pa_va_offset, + align: _, + } => Self::map_linear(start, size, flags, pt, pa_va_offset), + Self::Alloc { populate, align } => { + Self::map_alloc(start, size, flags, pt, populate, align) + } } } fn unmap(&self, start: VirtAddr, size: usize, pt: &mut PageTable) -> bool { match *self { - Self::Linear { pa_va_offset } => Self::unmap_linear(start, size, pt, pa_va_offset), - Self::Alloc { populate } => Self::unmap_alloc(start, size, pt, populate), + Self::Linear { + pa_va_offset, + align: _, + } => Self::unmap_linear(start, size, pt, pa_va_offset), + Self::Alloc { populate, align } => Self::unmap_alloc(start, size, pt, populate, align), } } @@ -79,8 +94,8 @@ impl Backend { ) -> bool { match *self { Self::Linear { .. } => false, // Linear mappings should not trigger page faults. - Self::Alloc { populate } => { - Self::handle_page_fault_alloc(vaddr, orig_flags, page_table, populate) + Self::Alloc { populate, align } => { + Self::handle_page_fault_alloc(vaddr, orig_flags, page_table, populate, align) } } } diff --git a/modules/axmm/src/backend/page_iter_wrapper.rs b/modules/axmm/src/backend/page_iter_wrapper.rs new file mode 100644 index 0000000000..98b5ac33fb --- /dev/null +++ b/modules/axmm/src/backend/page_iter_wrapper.rs @@ -0,0 +1,88 @@ +//! Memory Page Iterator Wrapper Module +//! +//! Provides a unified iteration interface across different page sizes, +//! supporting address iteration for 4K, 2M, and 1G page sizes. +//! The design is inspired by the Iterator Wrapper pattern, +//! using an enum to unify the behavior of iterators for different page sizes. + +use memory_addr::{PageIter, PageIter4K, VirtAddr}; +use page_table_multiarch::PageSize; + +/// 2MB page size constant (2,097,152 bytes) +pub const PAGE_SIZE_2M: usize = 0x20_0000; + +/// 2MB page iterator type alias +/// +/// Wraps the `PageIter` struct with a fixed page size of `PAGE_SIZE_2M` +pub type PageIter2M = PageIter; + +/// 1GB page size constant (1,073,741,824 bytes) +pub const PAGE_SIZE_1G: usize = 0x4000_0000; + +/// 1GB page iterator type alias +/// +/// Wraps the `PageIter` struct with a fixed page size of `PAGE_SIZE_1G` +pub type PageIter1G = PageIter; + +/// Page Iterator Wrapper Enum +/// +/// Unifies the iterator interfaces for different page sizes, enabling transparent +/// access to address iteration. +/// The design follows the Iterator Wrapper pattern, eliminating type differences +/// between iterators of varying page sizes. +pub enum PageIterWrapper { + Size4K(PageIter4K), + Size2M(PageIter2M), + Size1G(PageIter1G), +} + +impl PageIterWrapper { + /// Creates an iterator wrapper instance for the specified page size + /// + /// # Parameters + /// - `start`: Starting virtual address (inclusive) + /// - `end`: Ending virtual address (exclusive) + /// - `page_size`: Enum type specifying the page size + /// + /// # Returns + /// Returns an `Option` wrapping the iterator instance. Returns `None` if the page size is unsupported. + /// + /// # Example + /// ``` + // let iter = PageIterWrapper::new(start_addr, end_addr, PageSize::Size2M); + /// ``` + pub fn new(start: VirtAddr, end: VirtAddr, page_size: PageSize) -> Option { + match page_size { + PageSize::Size4K => PageIter4K::::new(start, end).map(Self::Size4K), + PageSize::Size2M => PageIter2M::::new(start, end).map(Self::Size2M), + PageSize::Size1G => PageIter1G::::new(start, end).map(Self::Size1G), + _ => None, + } + } +} + +/// Iterator trait implementation +/// +/// Unifies address iteration behavior for all three page sizes, +/// providing a transparent external access interface. +/// The implementation follows the paginated iterator design pattern, +/// using an enum to dispatch calls to the underlying iterators. +impl Iterator for PageIterWrapper { + type Item = VirtAddr; + + /// Retrieves the next virtual address + /// + /// # Returns + /// Returns an `Option` wrapping the virtual address. Returns `None` when the iteration is complete. + /// + /// # Implementation Details + /// Based on the current enum variant, the corresponding underlying iterator is called. + /// The original behavior of each page size iterator is preserved. + fn next(&mut self) -> Option { + match self { + Self::Size4K(iter) => iter.next(), + Self::Size2M(iter) => iter.next(), + Self::Size1G(iter) => iter.next(), + } + } +} diff --git a/modules/axmm/src/lib.rs b/modules/axmm/src/lib.rs index 1be9043498..ad1db55875 100644 --- a/modules/axmm/src/lib.rs +++ b/modules/axmm/src/lib.rs @@ -18,6 +18,7 @@ use kspin::SpinNoIrq; use lazyinit::LazyInit; use memory_addr::{PhysAddr, va}; use memory_set::MappingError; +use page_table_multiarch::PageSize; static KERNEL_ASPACE: LazyInit> = LazyInit::new(); @@ -37,7 +38,13 @@ pub fn new_kernel_aspace() -> AxResult { axconfig::plat::KERNEL_ASPACE_SIZE, )?; for r in axhal::mem::memory_regions() { - aspace.map_linear(phys_to_virt(r.paddr), r.paddr, r.size, r.flags.into())?; + aspace.map_linear( + phys_to_virt(r.paddr), + r.paddr, + r.size, + r.flags.into(), + PageSize::Size4K, + )?; } Ok(aspace) }