Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
is now a type alias for `GuestRegionContainer<GuestRegionMmap>`).
- \[[#338](https://github.com/rust-vmm/vm-memory/pull/338)\] Make `GuestMemoryAtomic` always implement `Clone`.
- \[[#338](https://github.com/rust-vmm/vm-memory/pull/338)\] Make `GuestAddressSpace` a subtrait of `Clone`.
- \[[#339](https://github.com/rust-vmm/vm-memory/pull/339)\] Add `GuestMemory::get_slices()`

### Changed

Expand All @@ -22,6 +23,7 @@
and `GuestRegionMmap::from_range` to be separate from the error type returned by `GuestRegionCollection` functions.
Change return type of `GuestRegionMmap::new` from `Result` to `Option`.
- \[#324](https:////github.com/rust-vmm/vm-memory/pull/324)\] `GuestMemoryRegion::bitmap()` now returns a `BitmapSlice`. Accessing the full bitmap is now possible only if the type of the memory region is know, for example with `MmapRegion::bitmap()`.
- \[[#339](https://github.com/rust-vmm/vm-memory/pull/339)\] Implement `Bytes::load()` and `Bytes::store()` with `get_slices()` instead of `to_region_addr()`

### Removed

Expand All @@ -33,6 +35,10 @@
The `mmap(2)` syscall itself already validates that offset and length are valid, and trying to replicate this check
in userspace ended up being imperfect.

### Fixed

- \[[#339](https://github.com/rust-vmm/vm-memory/pull/339)\] Fix `Bytes::read()` and `Bytes::write()` not to ignore `try_access()`'s `count` parameter

## \[v0.16.1\]

### Added
Expand Down
4 changes: 3 additions & 1 deletion src/bitmap/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,9 @@ pub(crate) mod tests {
let dirty_len = size_of_val(&val);

let (region, region_addr) = m.to_region_addr(dirty_addr).unwrap();
let slice = m.get_slice(dirty_addr, dirty_len).unwrap();
let mut slices = m.get_slices(dirty_addr, dirty_len);
let slice = slices.next().unwrap().unwrap();
assert!(slices.next().is_none());

assert!(range_is_clean(&region.bitmap(), 0, region.len() as usize));
assert!(range_is_clean(slice.bitmap(), 0, dirty_len));
Expand Down
125 changes: 113 additions & 12 deletions src/guest_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@
use std::convert::From;
use std::fs::File;
use std::io;
use std::iter::FusedIterator;
use std::mem::size_of;
use std::ops::{BitAnd, BitOr, Deref};
use std::rc::Rc;
use std::sync::atomic::Ordering;
Expand Down Expand Up @@ -455,17 +457,110 @@ pub trait GuestMemory {
.ok_or(Error::InvalidGuestAddress(addr))
.and_then(|(r, addr)| r.get_slice(addr, count))
}

/// Returns an iterator over [`VolatileSlice`](struct.VolatileSlice.html)s, together covering
/// `count` bytes starting at `addr`.
///
/// Iterating in this way is necessary because the given address range may be fragmented across
/// multiple [`GuestMemoryRegion`]s.
///
/// The iterator’s items are wrapped in [`Result`], i.e. errors are reported on individual
/// items. If there is no such error, the cumulative length of all items will be equal to
/// `count`. If `count` is 0, an empty iterator will be returned.
fn get_slices<'a>(
&'a self,
addr: GuestAddress,
count: usize,
) -> GuestMemorySliceIterator<'a, Self> {
GuestMemorySliceIterator {
mem: self,
addr,
count,
}
}
}

/// Iterates over [`VolatileSlice`]s that together form a guest memory area.
///
/// Returned by [`GuestMemory::get_slices()`].
#[derive(Debug)]
pub struct GuestMemorySliceIterator<'a, M: GuestMemory + ?Sized> {
/// Underlying memory
mem: &'a M,
/// Next address in the guest memory area
addr: GuestAddress,
/// Remaining bytes in the guest memory area
count: usize,
}

impl<'a, M: GuestMemory + ?Sized> GuestMemorySliceIterator<'a, M> {
/// Helper function for [`<Self as Iterator>::next()`](GuestMemorySliceIterator::next).
///
/// Get the next slice (i.e. the one starting from `self.addr` with a length up to
/// `self.count`) and update the internal state.
///
/// # Safety
///
/// This function does not reset to `self.count` to 0 in case of error, i.e. will not stop
/// iterating. Actual behavior after an error is ill-defined, so the caller must check the
/// return value, and in case of an error, reset `self.count` to 0.
///
/// (This is why this function exists, so this resetting can be done in a single central
/// location.)
unsafe fn do_next(&mut self) -> Option<Result<VolatileSlice<'a, MS<'a, M>>>> {
if self.count == 0 {
return None;
}

let Some((region, start)) = self.mem.to_region_addr(self.addr) else {
return Some(Err(Error::InvalidGuestAddress(self.addr)));
};

let cap = region.len() - start.raw_value();
let len = std::cmp::min(cap, self.count as GuestUsize);

self.count -= len as usize;
self.addr = match self.addr.overflowing_add(len as GuestUsize) {
(x @ GuestAddress(0), _) | (x, false) => x,
(_, true) => return Some(Err(Error::GuestAddressOverflow)),
};

Some(region.get_slice(start, len as usize))
}
}

impl<'a, M: GuestMemory + ?Sized> Iterator for GuestMemorySliceIterator<'a, M> {
type Item = Result<VolatileSlice<'a, MS<'a, M>>>;

fn next(&mut self) -> Option<Self::Item> {
// SAFETY:
// We reset `self.count` to 0 on error
match unsafe { self.do_next() } {
Some(Ok(slice)) => Some(Ok(slice)),
other => {
// On error (or end), reset to 0 so iteration remains stopped
self.count = 0;
other
}
}
}
}

/// This iterator continues to return `None` when exhausted.
///
/// [`<Self as Iterator>::next()`](GuestMemorySliceIterator::next) sets `self.count` to 0 when
/// returning `None`, ensuring that it will only return `None` from that point on.
impl<M: GuestMemory + ?Sized> FusedIterator for GuestMemorySliceIterator<'_, M> {}

impl<T: GuestMemory + ?Sized> Bytes<GuestAddress> for T {
type E = Error;

fn write(&self, buf: &[u8], addr: GuestAddress) -> Result<usize> {
self.try_access(
buf.len(),
addr,
|offset, _count, caddr, region| -> Result<usize> {
region.write(&buf[offset..], caddr)
|offset, count, caddr, region| -> Result<usize> {
region.write(&buf[offset..(offset + count)], caddr)
},
)
}
Expand All @@ -474,8 +569,8 @@ impl<T: GuestMemory + ?Sized> Bytes<GuestAddress> for T {
self.try_access(
buf.len(),
addr,
|offset, _count, caddr, region| -> Result<usize> {
region.read(&mut buf[offset..], caddr)
|offset, count, caddr, region| -> Result<usize> {
region.read(&mut buf[offset..(offset + count)], caddr)
},
)
}
Expand Down Expand Up @@ -591,17 +686,23 @@ impl<T: GuestMemory + ?Sized> Bytes<GuestAddress> for T {
}

fn store<O: AtomicAccess>(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()> {
// `find_region` should really do what `to_region_addr` is doing right now, except
// it should keep returning a `Result`.
self.to_region_addr(addr)
.ok_or(Error::InvalidGuestAddress(addr))
.and_then(|(region, region_addr)| region.store(val, region_addr, order))
// No need to check past the first iterator item: It either has the size of `O`, then there
// can be no further items; or it does not, and then `VolatileSlice::store()` will fail.
self.get_slices(addr, size_of::<O>())
.next()
.unwrap()? // count > 0 never produces an empty iterator
.store(val, 0, order)
.map_err(Into::into)
}

fn load<O: AtomicAccess>(&self, addr: GuestAddress, order: Ordering) -> Result<O> {
self.to_region_addr(addr)
.ok_or(Error::InvalidGuestAddress(addr))
.and_then(|(region, region_addr)| region.load(region_addr, order))
// No need to check past the first iterator item: It either has the size of `O`, then there
// can be no further items; or it does not, and then `VolatileSlice::store()` will fail.
self.get_slices(addr, size_of::<O>())
.next()
.unwrap()? // count > 0 never produces an empty iterator
.load(0, order)
.map_err(Into::into)
}
}

Expand Down
50 changes: 50 additions & 0 deletions src/mmap/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -624,6 +624,56 @@ mod tests {
assert!(guest_mem.get_slice(GuestAddress(0xc00), 0x100).is_err());
}

#[test]
fn test_guest_memory_get_slices() {
let start_addr1 = GuestAddress(0);
let start_addr2 = GuestAddress(0x800);
let start_addr3 = GuestAddress(0xc00);
let guest_mem = GuestMemoryMmap::from_ranges(&[
(start_addr1, 0x400),
(start_addr2, 0x400),
(start_addr3, 0x400),
])
.unwrap();

// Same cases as `test_guest_memory_get_slice()`, just with `get_slices()`.
let slice_size = 0x200;
let mut slices = guest_mem.get_slices(GuestAddress(0x100), slice_size);
let slice = slices.next().unwrap().unwrap();
assert!(slices.next().is_none());
assert_eq!(slice.len(), slice_size);

let slice_size = 0x400;
let mut slices = guest_mem.get_slices(GuestAddress(0x800), slice_size);
let slice = slices.next().unwrap().unwrap();
assert!(slices.next().is_none());
assert_eq!(slice.len(), slice_size);

// Empty iterator.
assert!(guest_mem
.get_slices(GuestAddress(0x900), 0)
.next()
.is_none());

// Error cases, wrong size or base address.
let mut slices = guest_mem.get_slices(GuestAddress(0), 0x500);
assert_eq!(slices.next().unwrap().unwrap().len(), 0x400);
assert!(slices.next().unwrap().is_err());
assert!(slices.next().is_none());
let mut slices = guest_mem.get_slices(GuestAddress(0x600), 0x100);
assert!(slices.next().unwrap().is_err());
assert!(slices.next().is_none());
let mut slices = guest_mem.get_slices(GuestAddress(0x1000), 0x100);
assert!(slices.next().unwrap().is_err());
assert!(slices.next().is_none());

// Test fragmented case
let mut slices = guest_mem.get_slices(GuestAddress(0xa00), 0x400);
assert_eq!(slices.next().unwrap().unwrap().len(), 0x200);
assert_eq!(slices.next().unwrap().unwrap().len(), 0x200);
assert!(slices.next().is_none());
}

#[test]
fn test_atomic_accesses() {
let region = GuestRegionMmap::from_range(GuestAddress(0), 0x1000, None).unwrap();
Expand Down