diff --git a/examples/tour.rs b/examples/tour.rs index f32a9fdc..fbcb9c77 100644 --- a/examples/tour.rs +++ b/examples/tour.rs @@ -119,7 +119,7 @@ are dominant." println!("{:?}", bs.domain()); println!("Show the bits in memory"); for elt in bs.domain() { - println!("{:0w$b} ", elt, w = T::Mem::BITS as usize); + println!("{:0w$b} ", elt, w = ::BITS as usize); } println!(); } diff --git a/src/array.rs b/src/array.rs index 02864a1f..1a8b38d9 100644 --- a/src/array.rs +++ b/src/array.rs @@ -146,7 +146,6 @@ behavior of ordinary arrays `[T; N]` as they stand today. [`.as_bitslice()`]: Self::as_bitslice **/ #[repr(transparent)] -#[derive(Copy)] pub struct BitArray where O: BitOrder, @@ -227,7 +226,7 @@ where /// Views the array as a slice of its underlying memory registers. #[inline] - pub fn as_slice(&self) -> &[V::Store] { + pub fn as_raw_slice(&self) -> &[V::Store] { unsafe { slice::from_raw_parts( &self.data as *const V as *const V::Store, @@ -238,7 +237,7 @@ where /// Views the array as a mutable slice of its underlying memory registers. #[inline] - pub fn as_mut_slice(&mut self) -> &mut [V::Store] { + pub fn as_mut_raw_slice(&mut self) -> &mut [V::Store] { unsafe { slice::from_raw_parts_mut( &mut self.data as *mut V as *mut V::Store, @@ -247,6 +246,22 @@ where } } + #[doc(hidden)] + #[inline(always)] + #[cfg(not(tarpaulin_include))] + #[deprecated = "This is renamed to `as_raw_slice`"] + pub fn as_slice(&self) -> &[V::Store] { + self.as_raw_slice() + } + + #[doc(hidden)] + #[inline(always)] + #[cfg(not(tarpaulin_include))] + #[deprecated = "This is renamed to `as_mut_raw_slice`"] + pub fn as_mut_slice(&mut self) -> &mut [V::Store] { + self.as_mut_raw_slice() + } + /// Views the interior buffer. #[inline(always)] #[cfg(not(tarpaulin_include))] diff --git a/src/array/iter.rs b/src/array/iter.rs index 8d70f1d0..b931b758 100644 --- a/src/array/iter.rs +++ b/src/array/iter.rs @@ -117,7 +117,7 @@ where fn get(&self, index: usize) -> bool { unsafe { self.array - .as_slice() + .as_raw_slice() .pipe(BitPtr::::from_slice) .add(index) .read() diff --git a/src/array/ops.rs b/src/array/ops.rs index 6747bab0..50575458 100644 --- a/src/array/ops.rs +++ b/src/array/ops.rs @@ -164,7 +164,7 @@ where #[inline] fn not(mut self) -> Self::Output { - for elem in self.as_mut_slice() { + for elem in self.as_mut_raw_slice() { elem.store_value(!elem.load_value()); } self diff --git a/src/array/tests.rs b/src/array/tests.rs index 89680f14..12a4956f 100644 --- a/src/array/tests.rs +++ b/src/array/tests.rs @@ -38,9 +38,9 @@ fn wrap_unwrap() { fn views() { let mut arr = bitarr![Msb0, u8; 0; 20]; - let s: &mut [u8] = arr.as_mut_slice(); + let s: &mut [u8] = arr.as_mut_raw_slice(); s[0] = !0u8; - let s: &[u8] = arr.as_slice(); + let s: &[u8] = arr.as_raw_slice(); assert_eq!(s, &[!0, 0, 0]); let a: &mut [u8; 3] = arr.as_mut_buffer(); diff --git a/src/array/traits.rs b/src/array/traits.rs index f154546f..8a9c3d9b 100644 --- a/src/array/traits.rs +++ b/src/array/traits.rs @@ -67,7 +67,9 @@ where #[inline] fn clone(&self) -> Self { let mut out = Self::zeroed(); - for (dst, src) in out.as_mut_slice().iter_mut().zip(self.as_slice()) { + for (dst, src) in + out.as_mut_raw_slice().iter_mut().zip(self.as_raw_slice()) + { dst.store_value(src.load_value()); } out @@ -377,6 +379,13 @@ where } } +impl Copy for BitArray +where + O: BitOrder, + V: BitView + Copy, +{ +} + impl Unpin for BitArray where O: BitOrder, diff --git a/src/boxed.rs b/src/boxed.rs index ac7d2560..1a905f69 100644 --- a/src/boxed.rs +++ b/src/boxed.rs @@ -225,7 +225,7 @@ where let mut boxed = ManuallyDrop::new(boxed); BitPtr::from_mut_slice(&mut boxed[..]) - .span(boxed.len() * T::Mem::BITS as usize) + .span(boxed.len() * ::BITS as usize) .map(|bitspan| Self { bitspan }) .map_err(|_| ManuallyDrop::into_inner(boxed)) } @@ -438,7 +438,7 @@ where let (_, head, bits) = bp.raw_parts(); let head = head.value() as usize; let tail = head + bits; - let full = crate::mem::elts::(tail) * T::Mem::BITS as usize; + let full = crate::mem::elts::(tail) * ::BITS as usize; unsafe { bp.set_head(BitIdx::ZERO); bp.set_len(full); diff --git a/src/devel.rs b/src/devel.rs index de144451..dd890f90 100644 --- a/src/devel.rs +++ b/src/devel.rs @@ -1,9 +1,17 @@ //! Internal support utilities. -use core::ops::{ - Bound, - Range, - RangeBounds, +use crate::{ + order::BitOrder, + store::BitStore, +}; + +use core::{ + any::TypeId, + ops::{ + Bound, + Range, + RangeBounds, + }, }; /** Normalizes any range into a basic `Range`. @@ -29,6 +37,7 @@ the function, and must be inspected by the caller. `bounds` normalized to an ordinary `Range`, optionally clamped to `end`. **/ +#[inline] pub fn normalize_range(bounds: R, end: usize) -> Range where R: RangeBounds { let min = match bounds.start_bound() { @@ -58,6 +67,7 @@ range end be not greater than the ending marker (if provided). This panics if the range fails a requirement. **/ +#[inline] pub fn assert_range(range: Range, end: impl Into>) { if range.start > range.end { panic!( @@ -75,6 +85,32 @@ pub fn assert_range(range: Range, end: impl Into>) { } } +/// Tests if two `BitOrder` type parameters match each other. +/// +/// This evaluates to a compile-time constant, and is removed during codegen. +#[inline(always)] +pub fn match_order() -> bool +where + O1: BitOrder, + O2: BitOrder, +{ + TypeId::of::() == TypeId::of::() +} + +/// Tests if two `` type parameter pairs match each other. +/// +/// This evaluates to a compile-time constant, and is removed during codegen. +#[inline(always)] +pub fn match_types() -> bool +where + O1: BitOrder, + T1: BitStore, + O2: BitOrder, + T2: BitStore, +{ + match_order::() && TypeId::of::() == TypeId::of::() +} + #[cfg(all(test, feature = "std"))] mod tests { use super::*; diff --git a/src/domain.rs b/src/domain.rs index 3f507ef2..b473dad4 100644 --- a/src/domain.rs +++ b/src/domain.rs @@ -231,7 +231,7 @@ macro_rules! bit_domain { let bitspan = slice.as_bitspan(); let h = bitspan.head(); let (e, t) = h.span(bitspan.len()); - let w = T::Mem::BITS; + let w = ::BITS; match (h.value(), e, t.value()) { (_, 0, _) => Self::empty(), @@ -258,7 +258,7 @@ macro_rules! bit_domain { ) -> Self { let (head, rest) = bit_domain!(split $($m)? slice, - (T::Mem::BITS - head.value()) as usize, + (::BITS - head.value()) as usize, ); let (body, tail) = bit_domain!(split $($m)? rest, @@ -289,7 +289,7 @@ macro_rules! bit_domain { ) -> Self { let (head, rest) = bit_domain!(split $($m)? slice, - (T::Mem::BITS - head.value()) as usize, + (::BITS - head.value()) as usize, ); let (head, body) = ( bit_domain!(retype $($m)? head), @@ -537,7 +537,7 @@ macro_rules! domain { let head = bitspan.head(); let elts = bitspan.elements(); let tail = bitspan.tail(); - let bits = T::Mem::BITS; + let bits = ::BITS; let base = bitspan.address().to_const() as *const _; match (head.value(), elts, tail.value()) { (_, 0, _) => Self::empty(), diff --git a/src/field.rs b/src/field.rs index dbd1b898..e3316a76 100644 --- a/src/field.rs +++ b/src/field.rs @@ -407,6 +407,9 @@ pub trait BitField { /// /// # Examples /// + /// This example shows how a value is segmented across multiple storage + /// elements: + /// /// ```rust /// use bitvec::prelude::*; /// @@ -431,6 +434,28 @@ pub trait BitField { /// ); /// ``` /// + /// And this example shows how the same memory region will be read by + /// different `BitOrder` implementors: + /// + /// ```rust + /// use bitvec::prelude::*; + /// + /// // Bit pos: 14 19 16 + /// // Lsb0: ─┤ ├──┤ + /// let arr = [0b0100_0000_0000_0011u16, 0b0001_0000_0000_1110u16]; + /// // Msb0: ├─ ├──┤ + /// // Bit pos: 14 16 19 + /// + /// assert_eq!( + /// arr.view_bits::()[14 .. 20].load_le::(), + /// 0b111001, + /// ); + /// assert_eq!( + /// arr.view_bits::()[14 .. 20].load_le::(), + /// 0b000111, + /// ); + /// ``` + /// /// [`M::BITS`]: crate::mem::BitMemory::BITS /// [`self.len()`]: crate::slice::BitSlice::len fn load_le(&self) -> M @@ -465,6 +490,9 @@ pub trait BitField { /// /// # Examples /// + /// This example shows how a value is segmented across multiple storage + /// elements: + /// /// ```rust /// use bitvec::prelude::*; /// @@ -491,6 +519,27 @@ pub trait BitField { /// ); /// ``` /// + /// And this example shows how the same memory region will be read by + /// different `BitOrder` implementations: + /// + /// ```rust + /// use bitvec::prelude::*; + /// // Bit pos: 14 19 16 + /// // Lsb0: ─┤ ├──┤ + /// let arr = [0b0100_0000_0000_0011u16, 0b0001_0000_0000_1110u16]; + /// // Msb0: ├─ ├──┤ + /// // Bit pos: 14 15 19 + /// + /// assert_eq!( + /// arr.view_bits::()[14 .. 20].load_be::(), + /// 0b011110, + /// ); + /// assert_eq!( + /// arr.view_bits::()[14 .. 20].load_be::(), + /// 0b110001, + /// ); + /// ``` + /// /// [`M::BITS`]: crate::mem::BitMemory::BITS /// [`self.len()`]: crate::slice::BitSlice::len fn load_be(&self) -> M @@ -527,6 +576,9 @@ pub trait BitField { /// /// # Examples /// + /// This example shows how a value is segmented across multiple storage + /// elements: + /// /// ```rust /// use bitvec::prelude::*; /// @@ -551,6 +603,27 @@ pub trait BitField { /// ); /// ``` /// + /// And this example shows how the same memory region is written by + /// different `BitOrder` implementations: + /// + /// ```rust + /// use bitvec::prelude::*; + /// let mut lsb0 = bitarr![Lsb0, u16; 0; 32]; + /// let mut msb0 = bitarr![Msb0, u16; 0; 32]; + /// + /// // Bit pos: 14 19 16 + /// // Lsb0: ─┤ ├──┤ + /// let exp_lsb0 = [0b0100_0000_0000_0000u16, 0b0000_0000_0000_1110u16]; + /// let exp_msb0 = [0b0000_0000_0000_0011u16, 0b0001_0000_0000_0000u16]; + /// // Msb0: ├─ ├──┤ + /// // Bit pos: 14 15 19 + /// + /// lsb0[14 ..= 19].store_le(0b111001u8); + /// msb0[14 ..= 19].store_le(0b000111u8); + /// assert_eq!(lsb0.as_raw_slice(), exp_lsb0); + /// assert_eq!(msb0.as_raw_slice(), exp_msb0); + /// ``` + /// /// [`M::BITS`]: crate::mem::BitMemory::BITS /// [`self.len()`]: crate::slice::BitSlice::len fn store_le(&mut self, value: M) @@ -587,6 +660,9 @@ pub trait BitField { /// /// # Examples /// + /// This example shows how a value is segmented across multiple storage + /// elements: + /// /// ```rust /// use bitvec::prelude::*; /// @@ -611,6 +687,27 @@ pub trait BitField { /// ); /// ``` /// + /// And this example shows how the same memory region is written by + /// different `BitOrder` implementations: + /// + /// ```rust + /// use bitvec::prelude::*; + /// let mut lsb0 = bitarr![Lsb0, u16; 0; 32]; + /// let mut msb0 = bitarr![Msb0, u16; 0; 32]; + /// + /// // Bit pos: 14 19 16 + /// // Lsb0: ─┤ ├──┤ + /// let exp_lsb0 = [0b0100_0000_0000_0000u16, 0b0000_0000_0000_1110u16]; + /// let exp_msb0 = [0b0000_0000_0000_0011u16, 0b0001_0000_0000_0000u16]; + /// // Msb0: ├─ ├──┤ + /// // Bit pos: 14 15 19 + /// + /// lsb0[14 ..= 19].store_be(0b011110u8); + /// msb0[14 ..= 19].store_be(0b110001u8); + /// assert_eq!(lsb0.as_raw_slice(), exp_lsb0); + /// assert_eq!(msb0.as_raw_slice(), exp_msb0); + /// ``` + /// /// [`M::BITS`]: crate::mem::BitMemory::BITS /// [`self.len()`]: crate::slice::BitSlice::len fn store_be(&mut self, value: M) @@ -701,15 +798,20 @@ where T: BitStore As a const-expression, this branch folds at compile-time to conditionally remove or retain the instruction. */ - if M::BITS > T::Mem::BITS { - accum <<= T::Mem::BITS; + if ::BITS > ::BITS { + accum <<= ::BITS; } accum |= resize::(elem); } if let Some((head, elem)) = head { let shamt = head.value(); - accum <<= T::Mem::BITS - shamt; + if ::BITS > ::BITS - shamt { + accum <<= ::BITS - shamt; + } + else { + accum = M::ZERO; + } accum |= get::(elem, Lsb0::mask(head, None), shamt); } @@ -780,16 +882,20 @@ where T: BitStore } for elem in body.iter().map(BitStore::load_value) { - if M::BITS > T::Mem::BITS { - accum <<= T::Mem::BITS; + if ::BITS > ::BITS { + accum <<= ::BITS; } accum |= resize::(elem); } if let Some((elem, tail)) = tail { - // If the tail is at the limit, then none of the above - // branches entered, and the shift would fail. Clamp to 0. - accum <<= tail.value() & M::MASK; + let shamt = tail.value(); + if ::BITS > shamt { + accum <<= shamt; + } + else { + accum = M::ZERO; + } accum |= get::(elem, Lsb0::mask(None, tail), 0); } @@ -835,13 +941,18 @@ where T: BitStore if let Some((head, elem)) = head { let shamt = head.value(); set::(elem, value, Lsb0::mask(head, None), shamt); - value >>= T::Mem::BITS - shamt; + if ::BITS > ::BITS - shamt { + value >>= ::BITS - shamt; + } + else { + value = M::ZERO; + } } for elem in body.iter_mut() { elem.store_value(resize(value)); - if M::BITS > T::Mem::BITS { - value >>= T::Mem::BITS; + if ::BITS > ::BITS { + value >>= ::BITS; } } @@ -888,15 +999,19 @@ where T: BitStore DomainMut::Region { head, body, tail } => { if let Some((elem, tail)) = tail { set::(elem, value, Lsb0::mask(None, tail), 0); - // If the tail is at the limit, then none of the below - // branches will enter, and the shift will fail. Clamp to 0 - value >>= tail.value() & M::MASK; + let shamt = tail.value(); + if ::BITS > shamt { + value >>= shamt; + } + else { + value = M::ZERO; + } } for elem in body.iter_mut().rev() { elem.store_value(resize(value)); - if M::BITS > T::Mem::BITS { - value >>= T::Mem::BITS; + if ::BITS > ::BITS { + value >>= ::BITS; } } @@ -969,7 +1084,7 @@ where T: BitStore Domain::Enclave { head, elem, tail } => get::( elem, Msb0::mask(head, tail), - T::Mem::BITS - tail.value(), + ::BITS - tail.value(), ), Domain::Region { head, body, tail } => { let mut accum = M::ZERO; @@ -978,19 +1093,25 @@ where T: BitStore accum = get::( elem, Msb0::mask(None, tail), - T::Mem::BITS - tail.value(), + ::BITS - tail.value(), ); } for elem in body.iter().rev().map(BitStore::load_value) { - if M::BITS > T::Mem::BITS { - accum <<= T::Mem::BITS; + if ::BITS > ::BITS { + accum <<= ::BITS; } accum |= resize::(elem); } if let Some((head, elem)) = head { - accum <<= T::Mem::BITS - head.value(); + let shamt = ::BITS - head.value(); + if ::BITS > shamt { + accum <<= shamt; + } + else { + accum = M::ZERO; + } accum |= get::(elem, Msb0::mask(head, None), 0); } @@ -1052,7 +1173,7 @@ where T: BitStore Domain::Enclave { head, elem, tail } => get::( elem, Msb0::mask(head, tail), - T::Mem::BITS - tail.value(), + ::BITS - tail.value(), ), Domain::Region { head, body, tail } => { let mut accum = M::ZERO; @@ -1062,19 +1183,24 @@ where T: BitStore } for elem in body.iter().map(BitStore::load_value) { - if M::BITS > T::Mem::BITS { - accum <<= T::Mem::BITS; + if ::BITS > ::BITS { + accum <<= ::BITS; } accum |= resize::(elem); } if let Some((elem, tail)) = tail { - let width = tail.value(); - accum <<= width; + let shamt = tail.value(); + if ::BITS > shamt { + accum <<= shamt; + } + else { + accum = M::ZERO; + } accum |= get::( elem, Msb0::mask(None, tail), - T::Mem::BITS - width, + ::BITS - shamt, ); } @@ -1117,18 +1243,24 @@ where T: BitStore elem, value, Msb0::mask(head, tail), - T::Mem::BITS - tail.value(), + ::BITS - tail.value(), ), DomainMut::Region { head, body, tail } => { if let Some((head, elem)) = head { set::(elem, value, Msb0::mask(head, None), 0); - value >>= T::Mem::BITS - head.value(); + let shamt = ::BITS - head.value(); + if ::BITS > shamt { + value >>= shamt; + } + else { + value = M::ZERO; + } } for elem in body.iter_mut() { elem.store_value(resize(value)); - if M::BITS > T::Mem::BITS { - value >>= T::Mem::BITS; + if ::BITS > ::BITS { + value >>= ::BITS; } } @@ -1137,7 +1269,7 @@ where T: BitStore elem, value, Msb0::mask(None, tail), - T::Mem::BITS - tail.value(), + ::BITS - tail.value(), ); } }, @@ -1178,7 +1310,7 @@ where T: BitStore elem, value, Msb0::mask(head, tail), - T::Mem::BITS - tail.value(), + ::BITS - tail.value(), ), DomainMut::Region { head, body, tail } => { if let Some((elem, tail)) = tail { @@ -1186,15 +1318,20 @@ where T: BitStore elem, value, Msb0::mask(None, tail), - T::Mem::BITS - tail.value(), + ::BITS - tail.value(), ); - value >>= tail.value(); + if ::BITS > tail.value() { + value >>= tail.value(); + } + else { + value = M::ZERO; + } } for elem in body.iter_mut().rev() { elem.store_value(resize(value)); - if M::BITS > T::Mem::BITS { - value >>= T::Mem::BITS; + if ::BITS > ::BITS { + value >>= ::BITS; } } @@ -1298,11 +1435,11 @@ where /// [`M::BITS`]: crate::mem::BitMemory::BITS fn check(action: &'static str, len: usize) where M: BitMemory { - if !(1 ..= M::BITS as usize).contains(&len) { + if !(1 ..= ::BITS as usize).contains(&len) { panic!( "Cannot {} {} bits from a {}-bit region", action, - M::BITS, + ::BITS, len, ); } diff --git a/src/field/tests.rs b/src/field/tests.rs index 19f6e181..840a0986 100644 --- a/src/field/tests.rs +++ b/src/field/tests.rs @@ -99,6 +99,65 @@ fn byte_fields() { assert_eq!(data.view_bits::()[2 .. 6].load_le::(), 5); } +#[test] +fn narrow_byte_fields() { + let mut data = [0u16; 2]; + + data.view_bits_mut::()[16 .. 24].store_be(0x12u8); + assert_eq!(data, [0x0000, 0x1200]); + assert_eq!(data.view_bits::()[16 .. 24].load_be::(), 0x12); + + data.view_bits_mut::()[8 .. 16].store_be(0x34u8); + assert_eq!(data, [0x0034, 0x1200]); + assert_eq!(data.view_bits::()[8 .. 16].load_be::(), 0x34); + + data.view_bits_mut::()[0 .. 8].store_be(0x56u8); + assert_eq!(data, [0x5634, 0x1200]); + assert_eq!(data.view_bits::()[0 .. 8].load_be::(), 0x56); + + data = [0; 2]; + + data.view_bits_mut::()[16 .. 24].store_le(0x12u8); + assert_eq!(data, [0x0000, 0x1200]); + assert_eq!(data.view_bits::()[16 .. 24].load_le::(), 0x12); + + data.view_bits_mut::()[8 .. 16].store_le(0x34u8); + assert_eq!(data, [0x0034, 0x1200]); + assert_eq!(data.view_bits::()[8 .. 16].load_le::(), 0x34); + + data.view_bits_mut::()[0 .. 8].store_le(0x56u8); + assert_eq!(data, [0x5634, 0x1200]); + assert_eq!(data.view_bits::()[0 .. 8].load_le::(), 0x56); + + data = [0; 2]; + + data.view_bits_mut::()[16 .. 24].store_be(0x12u8); + assert_eq!(data, [0x0000, 0x0012]); + assert_eq!(data.view_bits::()[16 .. 24].load_be::(), 0x12); + + data.view_bits_mut::()[8 .. 16].store_be(0x34u8); + assert_eq!(data, [0x3400, 0x0012]); + assert_eq!(data.view_bits::()[8 .. 16].load_be::(), 0x34); + + data.view_bits_mut::()[0 .. 8].store_be(0x56u8); + assert_eq!(data, [0x3456, 0x0012]); + assert_eq!(data.view_bits::()[0 .. 8].load_be::(), 0x56); + + data = [0; 2]; + + data.view_bits_mut::()[16 .. 24].store_le(0x12u8); + assert_eq!(data, [0x0000, 0x0012]); + assert_eq!(data.view_bits::()[16 .. 24].load_le::(), 0x12); + + data.view_bits_mut::()[8 .. 16].store_le(0x34u8); + assert_eq!(data, [0x3400, 0x0012]); + assert_eq!(data.view_bits::()[8 .. 16].load_le::(), 0x34); + + data.view_bits_mut::()[0 .. 8].store_le(0x56u8); + assert_eq!(data, [0x3456, 0x0012]); + assert_eq!(data.view_bits::()[0 .. 8].load_le::(), 0x56); +} + #[test] fn wide_load() { let mut data = bitarr![Lsb0, u16; 0; 256]; diff --git a/src/index.rs b/src/index.rs index a1b12329..b0bf6575 100644 --- a/src/index.rs +++ b/src/index.rs @@ -40,7 +40,7 @@ operations, and is used to create selection masks [`BitSel`] and [`BitMask`]. !*/ use crate::{ - mem::BitRegister, + mem::{BitRegister, BitMemory}, order::BitOrder, }; @@ -141,7 +141,7 @@ where R: BitRegister /// [`Self::LAST`]: Self::LAST /// [`Self::ZERO`]: Self::ZERO pub fn new(value: u8) -> Result> { - if value >= R::BITS { + if value >= ::BITS { return Err(BitIdxError::new(value)); } Ok(unsafe { Self::new_unchecked(value) }) @@ -168,10 +168,10 @@ where R: BitRegister /// [`Self::ZERO`]: Self::ZERO pub unsafe fn new_unchecked(value: u8) -> Self { debug_assert!( - value < R::BITS, + value < ::BITS, "Bit index {} cannot exceed type width {}", value, - R::BITS, + ::BITS, ); Self { idx: value, @@ -209,7 +209,7 @@ where R: BitRegister let next = self.idx + 1; ( unsafe { Self::new_unchecked(next & R::MASK) }, - next == R::BITS, + next == ::BITS, ) } @@ -355,7 +355,7 @@ where R: BitRegister if !ovf { // If `far` is in the origin element, then the jump moves zero // elements and produces `far` as an absolute index directly. - if (0 .. R::BITS as isize).contains(&far) { + if (0 .. ::BITS as isize).contains(&far) { (0, unsafe { Self::new_unchecked(far as u8) }) } /* Otherwise, downshift the bit distance to compute the number of @@ -480,10 +480,10 @@ where R: BitRegister /// Debug builds panic when `value` is a valid index for `R`. pub(crate) fn new(value: u8) -> Self { debug_assert!( - value >= R::BITS, + value >= ::BITS, "Bit index {} is valid for type width {}", value, - R::BITS + ::BITS ); Self { err: value, @@ -516,7 +516,7 @@ where R: BitRegister "The value {} is too large to index into {} ({} bits)", self.err, any::type_name::(), - R::BITS + ::BITS ) } } @@ -576,12 +576,12 @@ impl BitTail where R: BitRegister { /// The inclusive maximum tail within an element `R`. - pub(crate) const LAST: Self = Self { - end: R::BITS, + pub const LAST: Self = Self { + end: ::BITS, _ty: PhantomData, }; /// The inclusive minimum tail within an element `R`. - pub(crate) const ZERO: Self = Self { + pub const ZERO: Self = Self { end: 0, _ty: PhantomData, }; @@ -601,7 +601,7 @@ where R: BitRegister /// [`Self::LAST`]: Self::LAST /// [`Self::ZERO`]: Self::ZERO pub fn new(value: u8) -> Option { - if value > R::BITS { + if value > ::BITS { return None; } Some(unsafe { Self::new_unchecked(value) }) @@ -628,10 +628,10 @@ where R: BitRegister /// [`Self::ZERO`]: Self::ZERO pub(crate) unsafe fn new_unchecked(value: u8) -> Self { debug_assert!( - value <= R::BITS, + value <= ::BITS, "Bit tail {} cannot exceed type width {}", value, - R::BITS, + ::BITS, ); Self { end: value, @@ -709,7 +709,7 @@ where R: BitRegister let val = self.end; let head = val & R::MASK; - let bits_in_head = (R::BITS - head) as usize; + let bits_in_head = (::BITS - head) as usize; if len <= bits_in_head { return (1, unsafe { Self::new_unchecked(head + len as u8) }); @@ -810,7 +810,7 @@ where R: BitRegister /// This returns `Some(value)` when it is in the valid range `0 .. R::BITS`, /// and `None` when it is not. pub fn new(value: u8) -> Option { - if value >= R::BITS { + if value >= ::BITS { return None; } Some(unsafe { Self::new_unchecked(value) }) @@ -835,10 +835,10 @@ where R: BitRegister /// `value`. pub unsafe fn new_unchecked(value: u8) -> Self { debug_assert!( - value < R::BITS, + value < ::BITS, "Bit position {} cannot exceed type width {}", value, - R::BITS, + ::BITS, ); Self { pos: value, @@ -994,7 +994,7 @@ where R: BitRegister value.count_ones() == 1, "Selections are required to have exactly one set bit: {:0>1$b}", value, - R::BITS as usize, + ::BITS as usize, ); Self { sel: value } } @@ -1025,7 +1025,7 @@ impl Binary for BitSel where R: BitRegister { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - write!(fmt, "{:0>1$b}", self.sel, R::BITS as usize) + write!(fmt, "{:0>1$b}", self.sel, ::BITS as usize) } } @@ -1165,7 +1165,7 @@ impl Binary for BitMask where R: BitRegister { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - write!(fmt, "{:0>1$b}", self.mask, R::BITS as usize) + write!(fmt, "{:0>1$b}", self.mask, ::BITS as usize) } } diff --git a/src/mem.rs b/src/mem.rs index 3baa1d1b..fbe0f194 100644 --- a/src/mem.rs +++ b/src/mem.rs @@ -44,11 +44,11 @@ pub trait BitMemory: IsUnsigned + seal::Sealed { const BITS: u8 = mem::size_of::() as u8 * 8; /// The number of bits required to store an index in the range `0 .. BITS`. - const INDX: u8 = Self::BITS.trailing_zeros() as u8; + const INDX: u8 = ::BITS.trailing_zeros() as u8; /// A mask over all bits that can be used as an index within the element. /// This is the value with the least significant `INDX`-many bits set high. - const MASK: u8 = Self::BITS - 1; + const MASK: u8 = ::BITS - 1; } /** Description of a processor register. diff --git a/src/order.rs b/src/order.rs index 9738fba4..ddac5500 100644 --- a/src/order.rs +++ b/src/order.rs @@ -30,7 +30,7 @@ use crate::{ BitSel, BitTail, }, - mem::BitRegister, + mem::{BitRegister, BitMemory}, }; /** An ordering over a register. @@ -288,7 +288,7 @@ unsafe impl BitOrder for Lsb0 { upto ); let ct = upto - from; - if ct == R::BITS { + if ct == ::BITS { return BitMask::ALL; } // 1. Set all bits in the mask high @@ -336,7 +336,7 @@ unsafe impl BitOrder for Msb0 { upto ); let ct = upto - from; - if ct == R::BITS { + if ct == ::BITS { return BitMask::ALL; } // 1. Set all bits in the mask high. @@ -447,7 +447,7 @@ where let oname = type_name::(); let mname = type_name::(); - for n in 0 .. R::BITS { + for n in 0 .. ::BITS { // Wrap the counter as an index. let idx = unsafe { BitIdx::::new_unchecked(n) }; @@ -466,14 +466,14 @@ where // If the computed position exceeds the valid range, fail. assert!( - pos.value() < R::BITS, + pos.value() < ::BITS, "Error when verifying the implementation of `BitOrder` for `{}`: \ Index {} produces a bit position ({}) that exceeds the type width \ {}", oname, n, pos.value(), - R::BITS, + ::BITS, ); // Check `O`’s implementation of `select` diff --git a/src/ptr/span.rs b/src/ptr/span.rs index 507cb729..c940185b 100644 --- a/src/ptr/span.rs +++ b/src/ptr/span.rs @@ -470,8 +470,8 @@ where // slices of the original type to merge with `head` and `tail`. let (l, c, r) = body.align_to::(); - let t_bits = T::Mem::BITS as usize; - let u_bits = U::Mem::BITS as usize; + let t_bits = ::BITS as usize; + let u_bits = ::BITS as usize; let l_bits = l.len() * t_bits; let c_bits = c.len() * u_bits; @@ -1009,7 +1009,7 @@ where let (addr_b, head_b, bits_b) = other.raw_parts(); // Since ::BITS is an associated const, the compiler will automatically // replace the entire function with `false` when the types don’t match. - T1::Mem::BITS == T2::Mem::BITS + ::BITS == ::BITS && addr_a.value() == addr_b.value() && head_a.value() == head_b.value() && bits_a == bits_b diff --git a/src/serdes.rs b/src/serdes.rs index 26b324fa..7d1103f5 100644 --- a/src/serdes.rs +++ b/src/serdes.rs @@ -18,6 +18,10 @@ deserialize the [`BitSlice`] format into themselves. If you require de/serialization compatibility between [`BitArray`] and the other structures, please file an issue. +The exact implementation of the `serde` interfaces is considered an internal +detail and is not guaranteed; however, as it is technically public ABI, it will +only be modified in a major release (`0.X.n` to `0.Y.0` or `X.m.n` to `Y.0.0`). + [`BitArray`]: crate::array::BitArray [`BitBox`]: crate::boxed::BitBox [`BitSlice`]: crate::slice::BitSlice @@ -116,20 +120,20 @@ where } } +/// Serializes the interior storage type directly, rather than routing through a +/// dynamic sequence serializer. +#[cfg(not(tarpaulin_include))] impl Serialize for BitArray where O: BitOrder, - V: BitView, - ::Mem: Serialize, + V: BitView + Serialize, { + #[inline] fn serialize(&self, serializer: S) -> Result where S: Serializer { - let ary = self.as_slice(); - let mut state = serializer.serialize_seq(Some(ary.len()))?; - for elem in ary.iter().map(BitStore::load_value) { - state.serialize_element(&elem)?; - } - state.end() + unsafe { core::ptr::read(self) } + .value() + .serialize(serializer) } } @@ -229,7 +233,7 @@ where let bits = cmp::min( bits, data.len() - .saturating_mul(T::Mem::BITS as usize) + .saturating_mul(::BITS as usize) .saturating_sub(head as usize), ); // Assemble a pointer to the start bit, diff --git a/src/slice.rs b/src/slice.rs index 238d61cb..3aae3f17 100644 --- a/src/slice.rs +++ b/src/slice.rs @@ -75,7 +75,6 @@ use crate::{ }; use core::{ - any::TypeId, marker::PhantomData, ops::RangeBounds, ptr, @@ -489,7 +488,7 @@ where /// [`BitView`]: crate::view::BitView /// [`.view_bits::()`]: crate::view::BitView::view_bits pub fn from_element(elem: &T) -> &Self { - unsafe { BitPtr::from_ref(elem).span_unchecked(T::Mem::BITS as usize) } + unsafe { BitPtr::from_ref(elem).span_unchecked(::BITS as usize) } .to_bitslice_ref() } @@ -526,7 +525,7 @@ where /// [`BitView`]: crate::view::BitView /// [`.view_bits_mut::()`]: crate::view::BitView::view_bits_mut pub fn from_element_mut(elem: &mut T) -> &mut Self { - unsafe { BitPtr::from_mut(elem).span_unchecked(T::Mem::BITS as usize) } + unsafe { BitPtr::from_mut(elem).span_unchecked(::BITS as usize) } .to_bitslice_mut() } @@ -574,7 +573,7 @@ where // an inclusive cap. This is also pretty much impossible to hit. if elts >= Self::MAX_ELTS { return Err(BitSpanError::TooLong( - elts.saturating_mul(T::Mem::BITS as usize), + elts.saturating_mul(::BITS as usize), )); } Ok(unsafe { Self::from_slice_unchecked(slice) }) @@ -642,7 +641,7 @@ where let elts = slice.len(); if elts >= Self::MAX_ELTS { return Err(BitSpanError::TooLong( - elts.saturating_mul(T::Mem::BITS as usize), + elts.saturating_mul(::BITS as usize), )); } Ok(unsafe { Self::from_slice_unchecked_mut(slice) }) @@ -662,7 +661,7 @@ where /// [`MAX_ELTS`]: Self::MAX_ELTS /// [`::from_slice()`]: Self::from_slice pub unsafe fn from_slice_unchecked(slice: &[T]) -> &Self { - let bits = slice.len().wrapping_mul(T::Mem::BITS as usize); + let bits = slice.len().wrapping_mul(::BITS as usize); BitPtr::from_slice(slice) .span_unchecked(bits) .to_bitslice_ref() @@ -682,7 +681,7 @@ where /// [`MAX_ELTS`]: Self::MAX_ELTS /// [`::from_slice_mut()`]: Self::from_slice_mut pub unsafe fn from_slice_unchecked_mut(slice: &mut [T]) -> &mut Self { - let bits = slice.len().wrapping_mul(T::Mem::BITS as usize); + let bits = slice.len().wrapping_mul(::BITS as usize); BitPtr::from_mut_slice(slice) .span_unchecked(bits) .to_bitslice_mut() @@ -1147,6 +1146,8 @@ where /// assert_eq!(b, c); /// } /// ``` + #[inline(always)] + #[cfg(not(tarpaulin_include))] pub fn iter_ones(&self) -> IterOnes { IterOnes::new(self) } @@ -1172,6 +1173,8 @@ where /// assert_eq!(b, c); /// } /// ``` + #[inline(always)] + #[cfg(not(tarpaulin_include))] pub fn iter_zeros(&self) -> IterZeros { IterZeros::new(self) } @@ -1259,9 +1262,7 @@ where "Cloning between slices requires equal lengths" ); - if TypeId::of::() == TypeId::of::() - && TypeId::of::() == TypeId::of::() - { + if dvl::match_types::() { let that = src as *const _ as *const _; unsafe { self.copy_from_bitslice(&*that); @@ -1416,8 +1417,8 @@ where compiler’s `TypeId` API to inspect the type arguments passed to a monomorphization and select the appropriate codegen for it. We know that control will only enter any of these subsequent blocks when the type - argument to monomorphization matches the guard, rendering the - `transmute` calls type-level noöps. + argument to monomorphization matches the guard, so the pointer casts + become the identity function, which is safe and correct. This is only safe to do in `.copy_from_bitslice()`, not in `.clone_from_bitslice()`, because `BitField`’s behavior will only be @@ -1425,20 +1426,15 @@ where storage type arguments. Mismatches will cause an observed shuffling of sections as `BitField` reïnterprets raw bytes according to the machine register selected. - - Note also that the alias removal in the iterators is safe, because the - loops forbid the chunk references from overlapping their liveness with - each other, and `unalias_mut` has no effect when `T` is already an - aliased type. */ - else if TypeId::of::() == TypeId::of::() { + else if dvl::match_order::() { let this: &mut BitSlice = unsafe { &mut *(self as *mut _ as *mut _) }; let that: &BitSlice = unsafe { &*(src as *const _ as *const _) }; this.sp_copy_from_bitslice(that); } - else if TypeId::of::() == TypeId::of::() { + else if dvl::match_order::() { let this: &mut BitSlice = unsafe { &mut *(self as *mut _ as *mut _) }; let that: &BitSlice = @@ -1446,10 +1442,12 @@ where this.sp_copy_from_bitslice(that); } else { - for (to, from) in unsafe { self.iter_mut().remove_alias() } - .zip(src.iter().by_val()) + for (ptr, from) in + self.as_mut_bitptr_range().zip(src.iter().by_val()) { - to.set(from); + unsafe { + ptr.write(from); + } } } } @@ -1949,11 +1947,11 @@ where /// [`self.len()`]: Self::len pub unsafe fn copy_within_unchecked(&mut self, src: R, dest: usize) where R: RangeBounds { - if TypeId::of::() == TypeId::of::() { + if dvl::match_order::() { let this: &mut BitSlice = &mut *(self as *mut _ as *mut _); this.sp_copy_within_unchecked(src, dest); } - else if TypeId::of::() == TypeId::of::() { + else if dvl::match_order::() { let this: &mut BitSlice = &mut *(self as *mut _ as *mut _); this.sp_copy_within_unchecked(src, dest); } diff --git a/src/slice/iter.rs b/src/slice/iter.rs index 9d7a1f5c..d8af72e0 100644 --- a/src/slice/iter.rs +++ b/src/slice/iter.rs @@ -1,11 +1,16 @@ //! Iterators over `[T]`. use crate::{ + devel as dvl, mutability::{ Const, Mut, }, - order::BitOrder, + order::{ + BitOrder, + Lsb0, + Msb0, + }, ptr::{ BitPtrRange, BitRef, @@ -2258,7 +2263,7 @@ where O: BitOrder, T: BitStore, { - pub(crate) fn new(slice: &'a BitSlice) -> Self { + pub(super) fn new(slice: &'a BitSlice) -> Self { Self { inner: slice, front: 0, @@ -2287,10 +2292,26 @@ where type Item = usize; fn next(&mut self) -> Option { - match self.inner.iter().by_val().position(|b| b) { + let pos = if dvl::match_order::() { + let slice = unsafe { + &*(self.inner as *const _ as *const BitSlice) + }; + slice.sp_iter_ones_first() + } + else if dvl::match_order::() { + let slice = unsafe { + &*(self.inner as *const _ as *const BitSlice) + }; + slice.sp_iter_ones_first() + } + else { + self.inner.iter().by_val().position(|b| b) + }; + + match pos { Some(n) => { // Split on the far side of the found index. This is always - // safe, as split(len) yields (self, empty). + // safe, as split(len) yields `(self, empty)`. let (_, rest) = unsafe { self.inner.split_at_unchecked(n + 1) }; self.inner = rest; let out = self.front + n; @@ -2325,7 +2346,23 @@ where T: BitStore, { fn next_back(&mut self) -> Option { - match self.inner.iter().by_val().rposition(|b| b) { + let pos = if dvl::match_order::() { + let slice = unsafe { + &*(self.inner as *const _ as *const BitSlice) + }; + slice.sp_iter_ones_last() + } + else if dvl::match_order::() { + let slice = unsafe { + &*(self.inner as *const _ as *const BitSlice) + }; + slice.sp_iter_ones_last() + } + else { + self.inner.iter().by_val().rposition(|b| b) + }; + + match pos { Some(n) => { let (rest, _) = unsafe { self.inner.split_at_unchecked(n) }; self.inner = rest; @@ -2380,7 +2417,7 @@ where O: BitOrder, T: BitStore, { - pub(crate) fn new(slice: &'a BitSlice) -> Self { + pub(super) fn new(slice: &'a BitSlice) -> Self { Self { inner: slice, front: 0, @@ -2409,7 +2446,23 @@ where type Item = usize; fn next(&mut self) -> Option { - match self.inner.iter().by_val().position(|b| !b) { + let pos = if dvl::match_order::() { + let slice = unsafe { + &*(self.inner as *const _ as *const BitSlice) + }; + slice.sp_iter_zeros_first() + } + else if dvl::match_order::() { + let slice = unsafe { + &*(self.inner as *const _ as *const BitSlice) + }; + slice.sp_iter_zeros_first() + } + else { + self.inner.iter().by_val().position(|b| !b) + }; + + match pos { Some(n) => { let (_, rest) = unsafe { self.inner.split_at_unchecked(n + 1) }; self.inner = rest; @@ -2444,7 +2497,23 @@ where T: BitStore, { fn next_back(&mut self) -> Option { - match self.inner.iter().by_val().rposition(|b| !b) { + let pos = if dvl::match_order::() { + let slice = unsafe { + &*(self.inner as *const _ as *const BitSlice) + }; + slice.sp_iter_zeros_last() + } + else if dvl::match_order::() { + let slice = unsafe { + &*(self.inner as *const _ as *const BitSlice) + }; + slice.sp_iter_zeros_last() + } + else { + self.inner.iter().by_val().rposition(|b| !b) + }; + + match pos { Some(n) => { let (rest, _) = unsafe { self.inner.split_at_unchecked(n) }; self.inner = rest; diff --git a/src/slice/specialization.rs b/src/slice/specialization.rs index ac6f9694..db25c31b 100644 --- a/src/slice/specialization.rs +++ b/src/slice/specialization.rs @@ -10,9 +10,11 @@ and transmute generic slices into slices with concrete type arguments applied. use crate::{ devel as dvl, + domain::Domain, field::BitField, mem::BitMemory, order::{ + BitOrder, Lsb0, Msb0, }, @@ -22,6 +24,8 @@ use crate::{ use core::ops::RangeBounds; +use funty::IsInteger; + /** Order-specialized function implementations. These functions use [`BitField`] to provide batched load/store behavior. @@ -114,6 +118,200 @@ where T: BitStore .zip(other.chunks(chunk_size)) .all(|(a, b)| a.load_le::() == b.load_le::()) } + + /// Seeks the index of the first `1` bit in the bit-slice. + pub(crate) fn sp_iter_ones_first(&self) -> Option { + let mut accum = 0; + + match self.domain() { + Domain::Enclave { head, elem, tail } => { + let val = (Lsb0::mask(head, tail) & elem.load_value()).value(); + if val != T::Mem::ZERO { + accum += + val.trailing_zeros() as usize - head.value() as usize; + return Some(accum); + } + None + }, + Domain::Region { head, body, tail } => { + if let Some((head, elem)) = head { + let val = + (Lsb0::mask(head, None) & elem.load_value()).value(); + accum += + val.trailing_zeros() as usize - head.value() as usize; + if val != T::Mem::ZERO { + return Some(accum); + } + } + + for elem in body { + let val = elem.load_value(); + accum += val.trailing_zeros() as usize; + if val != T::Mem::ZERO { + return Some(accum); + } + } + + if let Some((elem, tail)) = tail { + let val = + (Lsb0::mask(None, tail) & elem.load_value()).value(); + if val != T::Mem::ZERO { + accum += val.trailing_zeros() as usize; + return Some(accum); + } + } + + None + }, + } + } + + /// Seeks the index of the last `1` bit in the bit-slice. + pub(crate) fn sp_iter_ones_last(&self) -> Option { + let mut out = match self.len() { + 0 => return None, + n => n - 1, + }; + match self.domain() { + Domain::Enclave { head, elem, tail } => { + let val = (Lsb0::mask(head, tail) & elem.load_value()).value(); + let dead_bits = ::BITS - tail.value(); + if val != T::Mem::ZERO { + out -= val.leading_zeros() as usize - dead_bits as usize; + return Some(out); + } + None + }, + Domain::Region { head, body, tail } => { + if let Some((elem, tail)) = tail { + let val = + (Lsb0::mask(None, tail) & elem.load_value()).value(); + let dead_bits = + ::BITS as usize - tail.value() as usize; + out -= val.leading_zeros() as usize - dead_bits; + if val != T::Mem::ZERO { + return Some(out); + } + } + + for elem in body.iter().rev() { + let val = elem.load_value(); + out -= val.leading_zeros() as usize; + if val != T::Mem::ZERO { + return Some(out); + } + } + + if let Some((head, elem)) = head { + let val = + (Lsb0::mask(head, None) & elem.load_value()).value(); + if val != T::Mem::ZERO { + out -= val.leading_zeros() as usize; + return Some(out); + } + } + + None + }, + } + } + + /// Seeks the index of the first `0` bit in the bit-slice. + pub(crate) fn sp_iter_zeros_first(&self) -> Option { + let mut accum = 0; + + match self.domain() { + Domain::Enclave { head, elem, tail } => { + // Load, invert, then mask and search for `1`. + let val = (Lsb0::mask(head, tail) & !elem.load_value()).value(); + accum += val.trailing_zeros() as usize - head.value() as usize; + if val != T::Mem::ZERO { + return Some(accum); + } + None + }, + Domain::Region { head, body, tail } => { + if let Some((head, elem)) = head { + let val = + (Lsb0::mask(head, None) & !elem.load_value()).value(); + accum += + val.trailing_zeros() as usize - head.value() as usize; + if val != T::Mem::ZERO { + return Some(accum); + } + } + + for elem in body { + let val = !elem.load_value(); + accum += val.trailing_zeros() as usize; + if val != T::Mem::ZERO { + return Some(accum); + } + } + + if let Some((elem, tail)) = tail { + let val = + (Lsb0::mask(None, tail) & !elem.load_value()).value(); + accum += val.trailing_zeros() as usize; + if val != T::Mem::ZERO { + return Some(accum); + } + } + + None + }, + } + } + + /// Seeks the index of the last `0` bit in the bit-slice. + pub(crate) fn sp_iter_zeros_last(&self) -> Option { + let mut out = match self.len() { + 0 => return None, + n => n - 1, + }; + match self.domain() { + Domain::Enclave { head, elem, tail } => { + let val = (Lsb0::mask(head, tail) & !elem.load_value()).value(); + let dead_bits = ::BITS - tail.value(); + if val != T::Mem::ZERO { + out -= val.leading_zeros() as usize - dead_bits as usize; + return Some(out); + } + None + }, + Domain::Region { head, body, tail } => { + if let Some((elem, tail)) = tail { + let val = + (Lsb0::mask(None, tail) & !elem.load_value()).value(); + let dead_bits = + ::BITS as usize - tail.value() as usize; + out -= val.leading_zeros() as usize - dead_bits; + if val != T::Mem::ZERO { + return Some(out); + } + } + + for elem in body.iter().rev() { + let val = !elem.load_value(); + out -= val.leading_zeros() as usize; + if val != T::Mem::ZERO { + return Some(out); + } + } + + if let Some((head, elem)) = head { + let val = + (Lsb0::mask(head, None) & !elem.load_value()).value(); + if val != T::Mem::ZERO { + out -= val.leading_zeros() as usize; + return Some(out); + } + } + + None + }, + } + } } /** Order-specialized function implementations. @@ -190,4 +388,197 @@ where T: BitStore .zip(other.chunks(chunk_size)) .all(|(a, b)| a.load_be::() == b.load_be::()) } + + /// Seeks the index of the first `1` bit in the bit-slice. + pub(crate) fn sp_iter_ones_first(&self) -> Option { + let mut accum = 0; + + match self.domain() { + Domain::Enclave { head, elem, tail } => { + let val = (Msb0::mask(head, tail) & elem.load_value()).value(); + accum += val.leading_zeros() as usize - head.value() as usize; + if val != T::Mem::ZERO { + return Some(accum); + } + None + }, + Domain::Region { head, body, tail } => { + if let Some((head, elem)) = head { + let val = + (Msb0::mask(head, None) & elem.load_value()).value(); + accum += + val.leading_zeros() as usize - head.value() as usize; + if val != T::Mem::ZERO { + return Some(accum); + } + } + + for elem in body { + let val = elem.load_value(); + accum += val.leading_zeros() as usize; + if val != T::Mem::ZERO { + return Some(accum); + } + } + + if let Some((elem, tail)) = tail { + let val = + (Msb0::mask(None, tail) & elem.load_value()).value(); + accum += val.leading_zeros() as usize; + if val != T::Mem::ZERO { + return Some(accum); + } + } + + None + }, + } + } + + /// Seeks the index of the last `1` bit in the bit-slice. + pub(crate) fn sp_iter_ones_last(&self) -> Option { + // Set the state tracker to the last live index in the bit-slice. + let mut out = match self.len() { + 0 => return None, + n => n - 1, + }; + match self.domain() { + Domain::Enclave { head, elem, tail } => { + let val = (Msb0::mask(head, tail) & elem.load_value()).value(); + let dead_bits = ::BITS - tail.value(); + if val != T::Mem::ZERO { + out -= val.trailing_zeros() as usize - dead_bits as usize; + return Some(out); + } + None + }, + Domain::Region { head, body, tail } => { + if let Some((elem, tail)) = tail { + let val = + (Msb0::mask(None, tail) & elem.load_value()).value(); + let dead_bits = + ::BITS as usize - tail.value() as usize; + out -= val.trailing_zeros() as usize - dead_bits; + if val != T::Mem::ZERO { + return Some(out); + } + } + + for elem in body.iter().rev() { + let val = elem.load_value(); + out -= val.trailing_zeros() as usize; + if val != T::Mem::ZERO { + return Some(out); + } + } + + if let Some((head, elem)) = head { + let val = + (Msb0::mask(head, None) & elem.load_value()).value(); + if val != T::Mem::ZERO { + out -= val.trailing_zeros() as usize; + return Some(out); + } + } + + None + }, + } + } + + /// Seeks the index of the first `0` bit in the bit-slice. + pub(crate) fn sp_iter_zeros_first(&self) -> Option { + let mut accum = 0; + + match self.domain() { + Domain::Enclave { head, elem, tail } => { + let val = (Msb0::mask(head, tail) & !elem.load_value()).value(); + accum += val.leading_zeros() as usize - head.value() as usize; + if val != T::Mem::ZERO { + return Some(accum); + } + None + }, + Domain::Region { head, body, tail } => { + if let Some((head, elem)) = head { + let val = + (Msb0::mask(head, None) & !elem.load_value()).value(); + accum += + val.leading_zeros() as usize - head.value() as usize; + if val != T::Mem::ZERO { + return Some(accum); + } + } + + for elem in body { + let val = !elem.load_value(); + accum += val.leading_zeros() as usize; + if val != T::Mem::ZERO { + return Some(accum); + } + } + + if let Some((elem, tail)) = tail { + let val = + (Msb0::mask(None, tail) & !elem.load_value()).value(); + accum += val.leading_zeros() as usize; + if val != T::Mem::ZERO { + return Some(accum); + } + } + + None + }, + } + } + + /// Seeks the index of the last `0` bit in the bit-slice. + pub(crate) fn sp_iter_zeros_last(&self) -> Option { + let mut out = match self.len() { + 0 => return None, + n => n - 1, + }; + match self.domain() { + Domain::Enclave { head, elem, tail } => { + let val = (Msb0::mask(head, tail) & !elem.load_value()).value(); + let dead_bits = ::BITS - tail.value(); + if val != T::Mem::ZERO { + out -= val.trailing_zeros() as usize - dead_bits as usize; + return Some(out); + } + None + }, + Domain::Region { head, body, tail } => { + if let Some((elem, tail)) = tail { + let val = + (Msb0::mask(None, tail) & !elem.load_value()).value(); + let dead_bits = + ::BITS as usize - tail.value() as usize; + out -= val.trailing_zeros() as usize - dead_bits; + if val != T::Mem::ZERO { + return Some(out); + } + } + + for elem in body.iter().rev() { + let val = !elem.load_value(); + out -= val.trailing_zeros() as usize; + if val != T::Mem::ZERO { + return Some(out); + } + } + + if let Some((head, elem)) = head { + let val = + (Msb0::mask(head, None) & !elem.load_value()).value(); + if val != T::Mem::ZERO { + out -= val.trailing_zeros() as usize; + return Some(out); + } + } + + None + }, + } + } } diff --git a/src/slice/tests.rs b/src/slice/tests.rs index f08110bb..a0935fd7 100644 --- a/src/slice/tests.rs +++ b/src/slice/tests.rs @@ -420,7 +420,7 @@ fn unspecialized() { BitIdx, BitPos, }, - mem::BitRegister, + mem::{BitRegister, BitMemory}, prelude::*, }; @@ -429,7 +429,7 @@ fn unspecialized() { unsafe impl BitOrder for Swizzle { fn at(index: BitIdx) -> BitPos where R: BitRegister { - match R::BITS { + match ::BITS { 8 => BitPos::new(index.value() ^ 0b100).unwrap(), 16 => BitPos::new(index.value() ^ 0b1100).unwrap(), 32 => BitPos::new(index.value() ^ 0b11100).unwrap(), @@ -774,6 +774,93 @@ fn iter_ones_zeros() { assert!(zeros.nth_back(0).is_none()); } +#[test] +fn specialized_iter_ones() { + let data = [0x08u8, 0x20, 0, 0x04, 0x08]; + + let bits = data.view_bits::(); + assert!(bits[17 .. 23].sp_iter_ones_first().is_none()); + assert!(bits[17 .. 23].sp_iter_ones_last().is_none()); + assert!(bits[12 .. 28].sp_iter_ones_first().is_none()); + assert!(bits[12 .. 28].sp_iter_ones_last().is_none()); + + assert_eq!(bits[3 ..].sp_iter_ones_first(), Some(1)); + assert_eq!(bits[5 ..].sp_iter_ones_first(), Some(5)); + assert_eq!(bits[11 ..].sp_iter_ones_first(), Some(18)); + assert_eq!(bits[30 .. 38].sp_iter_ones_first(), Some(6)); + assert_eq!(bits[34 .. 38].sp_iter_ones_first(), Some(2)); + + assert_eq!(bits[.. 38].sp_iter_ones_last(), Some(36)); + assert_eq!(bits[.. 36].sp_iter_ones_last(), Some(29)); + assert_eq!(bits[.. 29].sp_iter_ones_last(), Some(10)); + assert_eq!(bits[2 .. 10].sp_iter_ones_last(), Some(2)); + assert_eq!(bits[2 .. 6].sp_iter_ones_last(), Some(2)); + + let bits = data.view_bits::(); + assert!(bits[17 .. 23].sp_iter_ones_first().is_none()); + assert!(bits[17 .. 23].sp_iter_ones_last().is_none()); + assert!(bits[14 .. 26].sp_iter_ones_first().is_none()); + assert!(bits[14 .. 26].sp_iter_ones_last().is_none()); + + assert_eq!(bits[2 ..].sp_iter_ones_first(), Some(1)); + assert_eq!(bits[4 ..].sp_iter_ones_first(), Some(9)); + assert_eq!(bits[14 ..].sp_iter_ones_first(), Some(12)); + assert_eq!(bits[27 .. 38].sp_iter_ones_first(), Some(8)); + assert_eq!(bits[34 .. 38].sp_iter_ones_first(), Some(1)); + + assert_eq!(bits[.. 38].sp_iter_ones_last(), Some(35)); + assert_eq!(bits[.. 35].sp_iter_ones_last(), Some(26)); + assert_eq!(bits[.. 26].sp_iter_ones_last(), Some(13)); + assert_eq!(bits[2 .. 13].sp_iter_ones_last(), Some(1)); + assert_eq!(bits[2 .. 6].sp_iter_ones_last(), Some(1)); +} + +#[test] +fn specialized_iter_zeros() { + let data = [!0x08u8, !0x20, !0, !0x04, !0x08]; + + let bits = data.view_bits::(); + assert!(bits[17 .. 23].sp_iter_zeros_first().is_none()); + assert!(bits[17 .. 23].sp_iter_zeros_last().is_none()); + assert!(bits[12 .. 28].sp_iter_zeros_first().is_none()); + assert!(bits[12 .. 28].sp_iter_zeros_last().is_none()); + + assert_eq!( + bits[3 ..].sp_iter_zeros_first(), + Some(1), + "{:b}", + &bits[3 ..] + ); + assert_eq!(bits[5 ..].sp_iter_zeros_first(), Some(5)); + assert_eq!(bits[11 ..].sp_iter_zeros_first(), Some(18)); + assert_eq!(bits[30 .. 38].sp_iter_zeros_first(), Some(6)); + assert_eq!(bits[34 .. 38].sp_iter_zeros_first(), Some(2)); + + assert_eq!(bits[.. 38].sp_iter_zeros_last(), Some(36)); + assert_eq!(bits[.. 36].sp_iter_zeros_last(), Some(29)); + assert_eq!(bits[.. 29].sp_iter_zeros_last(), Some(10)); + assert_eq!(bits[2 .. 10].sp_iter_zeros_last(), Some(2)); + assert_eq!(bits[2 .. 6].sp_iter_zeros_last(), Some(2)); + + let bits = data.view_bits::(); + assert!(bits[17 .. 23].sp_iter_zeros_first().is_none()); + assert!(bits[17 .. 23].sp_iter_zeros_last().is_none()); + assert!(bits[14 .. 26].sp_iter_zeros_first().is_none()); + assert!(bits[14 .. 26].sp_iter_zeros_last().is_none()); + + assert_eq!(bits[2 ..].sp_iter_zeros_first(), Some(1)); + assert_eq!(bits[4 ..].sp_iter_zeros_first(), Some(9)); + assert_eq!(bits[14 ..].sp_iter_zeros_first(), Some(12)); + assert_eq!(bits[27 .. 38].sp_iter_zeros_first(), Some(8)); + assert_eq!(bits[34 .. 38].sp_iter_zeros_first(), Some(1)); + + assert_eq!(bits[.. 38].sp_iter_zeros_last(), Some(35)); + assert_eq!(bits[.. 35].sp_iter_zeros_last(), Some(26)); + assert_eq!(bits[.. 26].sp_iter_zeros_last(), Some(13)); + assert_eq!(bits[2 .. 13].sp_iter_zeros_last(), Some(1)); + assert_eq!(bits[2 .. 6].sp_iter_zeros_last(), Some(1)); +} + #[cfg(feature = "alloc")] mod format { use crate::prelude::*; diff --git a/src/vec.rs b/src/vec.rs index 81567deb..ffe69263 100644 --- a/src/vec.rs +++ b/src/vec.rs @@ -426,7 +426,7 @@ where let capacity = vec.capacity(); BitPtr::from_mut_slice(vec.as_mut_slice()) - .span(vec.len() * T::Mem::BITS as usize) + .span(vec.len() * ::BITS as usize) .map(|bitspan| Self { bitspan, capacity }) .map_err(|_| ManuallyDrop::into_inner(vec)) } @@ -1010,13 +1010,14 @@ where let bitspan = self.bitspan; let head = bitspan.head(); let elts = bitspan.elements(); - let tail = head.value() as usize + new_len; - if let Some(extra) = tail.pipe(crate::mem::elts::).checked_sub(elts) { - self.with_vec(|vec| func(&mut **vec, extra)); - let capa = self.capacity(); - // Zero the newly-reserved buffer. - unsafe { self.get_unchecked_mut(len .. capa) }.set_all(false); - } + let new_elts = crate::mem::elts::(head.value() as usize + new_len); + let extra = new_elts - elts; + self.with_vec(|vec| { + func(&mut **vec, extra); + // Initialize any newly-allocated elements to zero, without + // initializing leftover dead capacity. + vec.resize_with(new_elts, || unsafe { mem::zeroed() }); + }); } /// Permits manipulation of the underlying vector allocation. diff --git a/src/vec/api.rs b/src/vec/api.rs index 6e79d6e3..8239ee9a 100644 --- a/src/vec/api.rs +++ b/src/vec/api.rs @@ -283,7 +283,7 @@ where #[inline] pub fn capacity(&self) -> usize { self.capacity - .checked_mul(T::Mem::BITS as usize) + .checked_mul(::BITS as usize) .expect("Bit-Vector capacity exceeded") // Don’t forget to subtract any dead bits in the front of the base! // This has to be saturating, becase a non-zero head on a zero diff --git a/src/view.rs b/src/view.rs index cfcfd0b7..6afcae71 100644 --- a/src/view.rs +++ b/src/view.rs @@ -203,7 +203,7 @@ macro_rules! view_bits { where O: BitOrder { unsafe { from_raw_parts_unchecked( BitPtr::from_slice(&self[..]), - $n * T::Mem::BITS as usize, + $n * ::BITS as usize, ) } } @@ -212,7 +212,7 @@ macro_rules! view_bits { where O: BitOrder { unsafe { from_raw_parts_unchecked_mut( BitPtr::from_mut_slice(&mut self[..]), - $n * T::Mem::BITS as usize, + $n * ::BITS as usize, ) } } diff --git a/tests/foreign_order.rs b/tests/foreign_order.rs index f84abd34..4cc5024a 100644 --- a/tests/foreign_order.rs +++ b/tests/foreign_order.rs @@ -12,7 +12,7 @@ use bitvec::{ BitIdx, BitPos, }, - mem::BitRegister, + mem::{BitRegister, BitMemory}, prelude::*, }; @@ -21,7 +21,7 @@ pub struct Swizzle; unsafe impl BitOrder for Swizzle { fn at(index: BitIdx) -> BitPos where R: BitRegister { - match R::BITS { + match ::BITS { 8 => BitPos::new(index.value() ^ 0b100).unwrap(), 16 => BitPos::new(index.value() ^ 0b1100).unwrap(), 32 => BitPos::new(index.value() ^ 0b11100).unwrap(),