@@ -1658,19 +1658,27 @@ impl<T, A: Allocator + Clone> IntoIterator for RawTable<T, A> {
16581658/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
16591659/// not track an item count.
16601660pub ( crate ) struct RawIterRange < T > {
1661+ // Pointer to the buckets for the current group.
1662+ data : Bucket < T > ,
1663+
1664+ inner : RawIterRangeInner ,
1665+ }
1666+
1667+ #[ derive( Clone , Copy ) ]
1668+ pub ( crate ) struct RawIterRangeInner {
16611669 // Mask of full buckets in the current group. Bits are cleared from this
16621670 // mask as each element is processed.
16631671 current_group : BitMask ,
16641672
1665- // Pointer to the buckets for the current group.
1666- data : Bucket < T > ,
1667-
16681673 // Pointer to the next group of control bytes,
16691674 // Must be aligned to the group size.
16701675 next_ctrl : * const u8 ,
16711676
16721677 // Pointer one past the last control byte of this range.
16731678 end : * const u8 ,
1679+
1680+ // Index to the buckets for the current group.
1681+ index : usize ,
16741682}
16751683
16761684impl < T > RawIterRange < T > {
@@ -1679,19 +1687,9 @@ impl<T> RawIterRange<T> {
16791687 /// The control byte address must be aligned to the group size.
16801688 #[ cfg_attr( feature = "inline-more" , inline) ]
16811689 unsafe fn new ( ctrl : * const u8 , data : Bucket < T > , len : usize ) -> Self {
1682- debug_assert_ne ! ( len, 0 ) ;
1683- debug_assert_eq ! ( ctrl as usize % Group :: WIDTH , 0 ) ;
1684- let end = ctrl. add ( len) ;
1685-
1686- // Load the first group and advance ctrl to point to the next group
1687- let current_group = Group :: load_aligned ( ctrl) . match_full ( ) ;
1688- let next_ctrl = ctrl. add ( Group :: WIDTH ) ;
1689-
16901690 Self {
1691- current_group,
16921691 data,
1693- next_ctrl,
1694- end,
1692+ inner : RawIterRangeInner :: new ( ctrl, len) ,
16951693 }
16961694 }
16971695
@@ -1703,15 +1701,15 @@ impl<T> RawIterRange<T> {
17031701 #[ cfg( feature = "rayon" ) ]
17041702 pub ( crate ) fn split ( mut self ) -> ( Self , Option < RawIterRange < T > > ) {
17051703 unsafe {
1706- if self . end <= self . next_ctrl {
1704+ if self . inner . end <= self . inner . next_ctrl {
17071705 // Nothing to split if the group that we are current processing
17081706 // is the last one.
17091707 ( self , None )
17101708 } else {
17111709 // len is the remaining number of elements after the group that
17121710 // we are currently processing. It must be a multiple of the
17131711 // group size (small tables are caught by the check above).
1714- let len = offset_from ( self . end , self . next_ctrl ) ;
1712+ let len = offset_from ( self . inner . end , self . inner . next_ctrl ) ;
17151713 debug_assert_eq ! ( len % Group :: WIDTH , 0 ) ;
17161714
17171715 // Split the remaining elements into two halves, but round the
@@ -1723,23 +1721,46 @@ impl<T> RawIterRange<T> {
17231721 let mid = ( len / 2 ) & !( Group :: WIDTH - 1 ) ;
17241722
17251723 let tail = Self :: new (
1726- self . next_ctrl . add ( mid) ,
1724+ self . inner . next_ctrl . add ( mid) ,
17271725 self . data . next_n ( Group :: WIDTH ) . next_n ( mid) ,
17281726 len - mid,
17291727 ) ;
17301728 debug_assert_eq ! (
17311729 self . data. next_n( Group :: WIDTH ) . next_n( mid) . ptr,
17321730 tail. data. ptr
17331731 ) ;
1734- debug_assert_eq ! ( self . end, tail. end) ;
1735- self . end = self . next_ctrl . add ( mid) ;
1736- debug_assert_eq ! ( self . end. add( Group :: WIDTH ) , tail. next_ctrl) ;
1732+ debug_assert_eq ! ( self . inner . end, tail. inner . end) ;
1733+ self . inner . end = self . inner . next_ctrl . add ( mid) ;
1734+ debug_assert_eq ! ( self . inner . end. add( Group :: WIDTH ) , tail. inner . next_ctrl) ;
17371735 ( self , Some ( tail) )
17381736 }
17391737 }
17401738 }
17411739}
17421740
1741+ impl RawIterRangeInner {
1742+ /// Returns a `RawIterRange` covering a subset of a table.
1743+ ///
1744+ /// The control byte address must be aligned to the group size.
1745+ #[ cfg_attr( feature = "inline-more" , inline) ]
1746+ unsafe fn new ( ctrl : * const u8 , len : usize ) -> Self {
1747+ debug_assert_ne ! ( len, 0 ) ;
1748+ debug_assert_eq ! ( ctrl as usize % Group :: WIDTH , 0 ) ;
1749+ let end = ctrl. add ( len) ;
1750+
1751+ // Load the first group and advance ctrl to point to the next group
1752+ let current_group = Group :: load_aligned ( ctrl) . match_full ( ) ;
1753+ let next_ctrl = ctrl. add ( Group :: WIDTH ) ;
1754+
1755+ Self {
1756+ current_group,
1757+ next_ctrl,
1758+ end,
1759+ index : 0 ,
1760+ }
1761+ }
1762+ }
1763+
17431764// We make raw iterators unconditionally Send and Sync, and let the PhantomData
17441765// in the actual iterator implementations determine the real Send/Sync bounds.
17451766unsafe impl < T > Send for RawIterRange < T > { }
@@ -1750,9 +1771,7 @@ impl<T> Clone for RawIterRange<T> {
17501771 fn clone ( & self ) -> Self {
17511772 Self {
17521773 data : self . data . clone ( ) ,
1753- next_ctrl : self . next_ctrl ,
1754- current_group : self . current_group ,
1755- end : self . end ,
1774+ inner : self . inner . clone ( ) ,
17561775 }
17571776 }
17581777}
@@ -1762,11 +1781,32 @@ impl<T> Iterator for RawIterRange<T> {
17621781
17631782 #[ cfg_attr( feature = "inline-more" , inline) ]
17641783 fn next ( & mut self ) -> Option < Bucket < T > > {
1784+ unsafe {
1785+ match self . inner . next ( ) {
1786+ Some ( index) => Some ( self . data . next_n ( index) ) ,
1787+ None => None ,
1788+ }
1789+ }
1790+ }
1791+
1792+ #[ cfg_attr( feature = "inline-more" , inline) ]
1793+ fn size_hint ( & self ) -> ( usize , Option < usize > ) {
1794+ self . inner . size_hint ( )
1795+ }
1796+ }
1797+
1798+ impl < T > FusedIterator for RawIterRange < T > { }
1799+
1800+ impl Iterator for RawIterRangeInner {
1801+ type Item = usize ;
1802+
1803+ #[ cfg_attr( feature = "inline-more" , inline) ]
1804+ fn next ( & mut self ) -> Option < Self :: Item > {
17651805 unsafe {
17661806 loop {
1767- if let Some ( index ) = self . current_group . lowest_set_bit ( ) {
1807+ if let Some ( group_index ) = self . current_group . lowest_set_bit ( ) {
17681808 self . current_group = self . current_group . remove_lowest_bit ( ) ;
1769- return Some ( self . data . next_n ( index) ) ;
1809+ return Some ( self . index + group_index ) ;
17701810 }
17711811
17721812 if self . next_ctrl >= self . end {
@@ -1779,7 +1819,7 @@ impl<T> Iterator for RawIterRange<T> {
17791819 // EMPTY. On larger tables self.end is guaranteed to be aligned
17801820 // to the group size (since tables are power-of-two sized).
17811821 self . current_group = Group :: load_aligned ( self . next_ctrl ) . match_full ( ) ;
1782- self . data = self . data . next_n ( Group :: WIDTH ) ;
1822+ self . index = Group :: WIDTH ;
17831823 self . next_ctrl = self . next_ctrl . add ( Group :: WIDTH ) ;
17841824 }
17851825 }
@@ -1795,8 +1835,6 @@ impl<T> Iterator for RawIterRange<T> {
17951835 }
17961836}
17971837
1798- impl < T > FusedIterator for RawIterRange < T > { }
1799-
18001838/// Iterator which returns a raw pointer to every full bucket in the table.
18011839///
18021840/// For maximum flexibility this iterator is not bound by a lifetime, but you
0 commit comments