From 2543c410d2329d36e2f83d240fef758c932b62a7 Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Thu, 15 Feb 2024 12:40:53 -0800 Subject: [PATCH] Add a quickcheck for growing or shrinking existing allocations (#233) * Add a quickcheck for growing or shrinking existing allocations * Bump MSRV to 1.73 To gain access `next_multiple_of` and `ilog2` integer methods. --- .github/workflows/rust.yaml | 2 +- README.md | 2 +- src/lib.rs | 49 ++++++++++++++++---- tests/all/allocator_api.rs | 91 +++++++++++++++++++++---------------- tests/all/quickcheck.rs | 50 ++++++++++++++++++-- 5 files changed, 137 insertions(+), 57 deletions(-) diff --git a/.github/workflows/rust.yaml b/.github/workflows/rust.yaml index b981863..c254da5 100644 --- a/.github/workflows/rust.yaml +++ b/.github/workflows/rust.yaml @@ -14,7 +14,7 @@ jobs: build: strategy: matrix: - rust_channel: ["stable", "beta", "nightly", "1.65.0"] + rust_channel: ["stable", "beta", "nightly", "1.73.0"] feature_set: ["--features collections,boxed"] include: - rust_channel: "nightly" diff --git a/README.md b/README.md index 9787bf3..d0da14c 100644 --- a/README.md +++ b/README.md @@ -224,7 +224,7 @@ the unstable nightly`Allocator` API on stable Rust. This means that ### Minimum Supported Rust Version (MSRV) -This crate is guaranteed to compile on stable Rust **1.65** and up. It might +This crate is guaranteed to compile on stable Rust **1.73** and up. It might compile with older versions but that may change in any new patch release. We reserve the right to increment the MSRV on minor releases, however we will diff --git a/src/lib.rs b/src/lib.rs index 1ff980c..b23cfea 100755 --- a/src/lib.rs +++ b/src/lib.rs @@ -406,6 +406,15 @@ unsafe fn dealloc_chunk_list(mut footer: NonNull) { // prevent sending the `Bump` across threads until the borrows end. unsafe impl Send for Bump {} +#[inline] +fn is_pointer_aligned_to(pointer: *mut T, align: usize) -> bool { + debug_assert!(align.is_power_of_two()); + + let pointer = pointer as usize; + let pointer_aligned = round_down_to(pointer, align); + pointer == pointer_aligned +} + #[inline] pub(crate) fn round_up_to(n: usize, divisor: usize) -> Option { debug_assert!(divisor > 0); @@ -1708,13 +1717,31 @@ impl Bump { old_layout: Layout, new_layout: Layout, ) -> Result, AllocErr> { + // If the new layout demands greater alignment than the old layout has, + // then either + // + // 1. the pointer happens to satisfy the new layout's alignment, so we + // got lucky and can return the pointer as-is, or + // + // 2. the pointer is not aligned to the new layout's demanded alignment, + // and we are unlucky. + // + // In the case of (2), to successfully "shrink" the allocation, we would + // have to allocate a whole new region for the new layout, without being + // able to free the old region. That is unacceptable, so simply return + // an allocation failure error instead. + if old_layout.align() < new_layout.align() { + if is_pointer_aligned_to(ptr.as_ptr(), new_layout.align()) { + return Ok(ptr); + } else { + return Err(AllocErr); + } + } + + debug_assert!(is_pointer_aligned_to(ptr.as_ptr(), new_layout.align())); + let old_size = old_layout.size(); let new_size = new_layout.size(); - let align_is_compatible = old_layout.align() >= new_layout.align(); - - if !align_is_compatible { - return Err(AllocErr); - } // This is how much space we would *actually* reclaim while satisfying // the requested alignment. @@ -1747,8 +1774,8 @@ impl Bump { // +-----+-----+-----+-----+-----+ // // But we MUST NOT have overlapping ranges because we use - // `copy_nonoverlapping` below! Therefore, we round the - // division up to avoid this issue. + // `copy_nonoverlapping` below! Therefore, we round the division + // up to avoid this issue. && delta >= (old_size + 1) / 2 { let footer = self.current_chunk_footer.get(); @@ -1763,10 +1790,12 @@ impl Bump { // in the `if` condition. ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), new_size); - Ok(new_ptr) - } else { - Ok(ptr) + return Ok(new_ptr); } + + // If this wasn't the last allocation, or shrinking wasn't worth it, + // simply return the old pointer as-is. + Ok(ptr) } #[inline] diff --git a/tests/all/allocator_api.rs b/tests/all/allocator_api.rs index fee8f24..8dfe18d 100644 --- a/tests/all/allocator_api.rs +++ b/tests/all/allocator_api.rs @@ -1,13 +1,12 @@ #![cfg(feature = "allocator_api")] +use crate::quickcheck; +use crate::quickcheck::arbitrary_layout; use bumpalo::Bump; - use std::alloc::{AllocError, Allocator, Layout}; use std::ptr::NonNull; use std::sync::atomic::{AtomicUsize, Ordering::Relaxed}; -use crate::quickcheck; - #[derive(Debug)] struct AllocatorDebug { bump: Bump, @@ -120,21 +119,10 @@ fn allocator_grow_zeroed() { quickcheck! { fn allocator_grow_align_increase(layouts: Vec<(usize, usize)>) -> bool { let mut layouts: Vec<_> = layouts.into_iter().map(|(size, align)| { - const MIN_SIZE: usize = 1; - const MAX_SIZE: usize = 1024; - const MIN_ALIGN: usize = 1; - const MAX_ALIGN: usize = 64; - - let align = usize::min(usize::max(align, MIN_ALIGN), MAX_ALIGN); - let align = usize::next_power_of_two(align); - - let size = usize::min(usize::max(size, MIN_SIZE), MAX_SIZE); - let size = usize::max(size, align); - - Layout::from_size_align(size, align).unwrap() + arbitrary_layout(size, align) }).collect(); - layouts.sort_unstable_by_key(|l| (l.size(), l.align())); + layouts.sort_by_key(|l| (l.size(), l.align())); let b = AllocatorDebug::new(Bump::new()); let mut layout_iter = layouts.into_iter(); @@ -160,23 +148,12 @@ quickcheck! { true } - fn allocator_shrink_align_change(layouts: Vec<(usize, usize)>) -> bool { + fn allocator_shrink_align_change(layouts: Vec<(usize, usize)>) -> () { let mut layouts: Vec<_> = layouts.into_iter().map(|(size, align)| { - const MIN_SIZE: usize = 1; - const MAX_SIZE: usize = 1024; - const MIN_ALIGN: usize = 1; - const MAX_ALIGN: usize = 64; - - let align = usize::min(usize::max(align, MIN_ALIGN), MAX_ALIGN); - let align = usize::next_power_of_two(align); - - let size = usize::min(usize::max(size, MIN_SIZE), MAX_SIZE); - let size = usize::max(size, align); - - Layout::from_size_align(size, align).unwrap() + arbitrary_layout(size, align) }).collect(); - layouts.sort_unstable_by_key(|l| l.size()); + layouts.sort_by_key(|l| l.size()); layouts.reverse(); let b = AllocatorDebug::new(Bump::new()); @@ -184,30 +161,59 @@ quickcheck! { if let Some(initial_layout) = layout_iter.next() { let mut pointer = b.allocate(initial_layout).unwrap(); - if !is_pointer_aligned_to(pointer, initial_layout.align()) { - return false; - } + assert!(is_pointer_aligned_to(pointer, initial_layout.align())); let mut old_layout = initial_layout; for new_layout in layout_iter { let res = unsafe { b.shrink(pointer.cast(), old_layout, new_layout) }; if old_layout.align() < new_layout.align() { - if res.is_ok() { - return false; + match res { + Ok(p) => assert!(is_pointer_aligned_to(p, new_layout.align())), + Err(_) => {} } } else { pointer = res.unwrap(); - if !is_pointer_aligned_to(pointer, new_layout.align()) { - return false; - } + assert!(is_pointer_aligned_to(pointer, new_layout.align())); old_layout = new_layout; } } } + } - true + fn allocator_grow_or_shrink(layouts: Vec<((usize, usize), (usize, usize))>) -> () { + let layouts = layouts + .into_iter() + .map(|((from_size, from_align), (to_size, to_align))| { + let from_layout = arbitrary_layout(from_size, from_align); + let to_layout = arbitrary_layout(to_size, to_align); + (from_layout, to_layout) + }); + + let b = AllocatorDebug::new(Bump::new()); + for (from_layout, to_layout) in layouts { + let pointer = b.allocate(from_layout).unwrap(); + assert!(is_pointer_aligned_to(pointer, from_layout.align())); + let pointer = pointer.cast::(); + + let result = if to_layout.size() <= from_layout.size() { + unsafe { b.shrink(pointer, from_layout, to_layout) } + } else { + unsafe { b.grow(pointer, from_layout, to_layout) } + }; + + match result { + Ok(new_pointer) => { + assert!(is_pointer_aligned_to(new_pointer, to_layout.align())); + } + // Bumpalo can return allocation errors in various situations, + // for example if we try to shrink an allocation but also grow + // its alignment in such a way that we cannot satisfy the + // requested alignment, and that is okay. + Err(_) => continue, + } + } } } @@ -223,7 +229,12 @@ fn allocator_shrink_layout_change() { let p4: NonNull = b.allocate(layout_align4).unwrap().cast(); let p16_res = unsafe { b.shrink(p4, layout_align4, layout_align16) }; - assert_eq!(p16_res, Err(AllocError)); + // This could either happen to succeed because `p4` already happened to be + // 16-aligned and could be reused, or `bumpalo` could return an error. + match p16_res { + Ok(p16) => assert!(is_pointer_aligned_to(p16, 16)), + Err(_) => {} + } } fn is_pointer_aligned_to(p: NonNull<[u8]>, align: usize) -> bool { diff --git a/tests/all/quickcheck.rs b/tests/all/quickcheck.rs index b2dabec..1d2d4ab 100644 --- a/tests/all/quickcheck.rs +++ b/tests/all/quickcheck.rs @@ -1,3 +1,8 @@ +use std::alloc::Layout; + +/// A redefinition/wrapper macro of `quickcheck::quickcheck!` that supports +/// limiting the number of test iterations to one when we are running under +/// MIRI. #[macro_export] macro_rules! quickcheck { ( @@ -16,16 +21,21 @@ macro_rules! quickcheck { $($code)* } - // Use QUICKCHECK_TESTS from compiletime to surpass miri isolation + let mut qc = ::quickcheck::QuickCheck::new(); + + // Use the `QUICKCHECK_TESTS` environment variable from + // compiletime to avoid violating MIRI's isolation by looking at + // the runtime environment variable. let tests = option_env!("QUICKCHECK_TESTS").and_then(|s| s.parse().ok()); + + // Limit quickcheck tests to a single iteration under MIRI, + // since they are otherwise super slow. #[cfg(miri)] let tests = tests.or(Some(1)); - let mut qc = ::quickcheck::QuickCheck::new(); - if let Some(tests) = tests { - eprintln!("Executing {} quickchecks", tests); - qc = qc.tests(tests) + eprintln!("Executing at most {} quickchecks", tests); + qc = qc.tests(tests); } qc.quickcheck(prop as fn($($arg_ty),*) -> $ret); @@ -33,3 +43,33 @@ macro_rules! quickcheck { )* }; } + +/// Map an arbitrary `x` to a power of 2 that is less than or equal to `max`, +/// but with as little bias as possible (eg rounding `min(x, max)` to the +/// nearest power of 2 is unacceptable because it would majorly bias `max` for +/// small values of `max`). +fn clamp_to_pow2_in_range(x: usize, max: usize) -> usize { + let log_x = max.ilog2() as usize; + if log_x == 0 { + return 1; + } + let divisor = usize::MAX / log_x; + let y = 1_usize << (x / divisor); + assert!(y.is_power_of_two(), "{y} is not a power of two"); + assert!(y <= max, "{y} is larger than {max}"); + y +} + +/// Helper to turn a pair of arbitrary `usize`s into a valid `Layout` of +/// reasonable size for use with quickchecks. +pub fn arbitrary_layout(size: usize, align: usize) -> Layout { + const MAX_ALIGN: usize = 64; + const MAX_SIZE: usize = 1024; + + let align = clamp_to_pow2_in_range(align, MAX_ALIGN); + + let size = size % (MAX_SIZE + 1); + let size = size.next_multiple_of(align); + + Layout::from_size_align(size, align).unwrap() +}