@@ -1634,17 +1634,13 @@ impl<T, A: AllocRef + Clone> IntoIterator for RawTable<T, A> {
1634
1634
/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
1635
1635
/// not track an item count.
1636
1636
pub ( crate ) struct RawIterRange < T > {
1637
- inner : RawIterRangeInner ,
1638
- // Pointer to the buckets for the current group.
1639
- data : Bucket < T > ,
1640
- }
1641
-
1642
- #[ derive( Clone ) ]
1643
- pub ( crate ) struct RawIterRangeInner {
1644
1637
// Mask of full buckets in the current group. Bits are cleared from this
1645
1638
// mask as each element is processed.
1646
1639
current_group : BitMask ,
1647
1640
1641
+ // Pointer to the buckets for the current group.
1642
+ data : Bucket < T > ,
1643
+
1648
1644
// Pointer to the next group of control bytes,
1649
1645
// Must be aligned to the group size.
1650
1646
next_ctrl : * const u8 ,
@@ -1659,9 +1655,19 @@ impl<T> RawIterRange<T> {
1659
1655
/// The control byte address must be aligned to the group size.
1660
1656
#[ cfg_attr( feature = "inline-more" , inline) ]
1661
1657
unsafe fn new ( ctrl : * const u8 , data : Bucket < T > , len : usize ) -> Self {
1658
+ debug_assert_ne ! ( len, 0 ) ;
1659
+ debug_assert_eq ! ( ctrl as usize % Group :: WIDTH , 0 ) ;
1660
+ let end = ctrl. add ( len) ;
1661
+
1662
+ // Load the first group and advance ctrl to point to the next group
1663
+ let current_group = Group :: load_aligned ( ctrl) . match_full ( ) ;
1664
+ let next_ctrl = ctrl. add ( Group :: WIDTH ) ;
1665
+
1662
1666
Self {
1663
- inner : RawIterRangeInner :: new ( ctrl , len ) ,
1667
+ current_group ,
1664
1668
data,
1669
+ next_ctrl,
1670
+ end,
1665
1671
}
1666
1672
}
1667
1673
@@ -1673,15 +1679,15 @@ impl<T> RawIterRange<T> {
1673
1679
#[ cfg( feature = "rayon" ) ]
1674
1680
pub ( crate ) fn split ( mut self ) -> ( Self , Option < RawIterRange < T > > ) {
1675
1681
unsafe {
1676
- if self . inner . end <= self . inner . next_ctrl {
1682
+ if self . end <= self . next_ctrl {
1677
1683
// Nothing to split if the group that we are current processing
1678
1684
// is the last one.
1679
1685
( self , None )
1680
1686
} else {
1681
1687
// len is the remaining number of elements after the group that
1682
1688
// we are currently processing. It must be a multiple of the
1683
1689
// group size (small tables are caught by the check above).
1684
- let len = offset_from ( self . inner . end , self . inner . next_ctrl ) ;
1690
+ let len = offset_from ( self . end , self . next_ctrl ) ;
1685
1691
debug_assert_eq ! ( len % Group :: WIDTH , 0 ) ;
1686
1692
1687
1693
// Split the remaining elements into two halves, but round the
@@ -1693,65 +1699,23 @@ impl<T> RawIterRange<T> {
1693
1699
let mid = ( len / 2 ) & !( Group :: WIDTH - 1 ) ;
1694
1700
1695
1701
let tail = Self :: new (
1696
- self . inner . next_ctrl . add ( mid) ,
1702
+ self . next_ctrl . add ( mid) ,
1697
1703
self . data . next_n ( Group :: WIDTH ) . next_n ( mid) ,
1698
1704
len - mid,
1699
1705
) ;
1700
1706
debug_assert_eq ! (
1701
1707
self . data. next_n( Group :: WIDTH ) . next_n( mid) . ptr,
1702
1708
tail. data. ptr
1703
1709
) ;
1704
- debug_assert_eq ! ( self . inner . end, tail. inner . end) ;
1705
- self . inner . end = self . inner . next_ctrl . add ( mid) ;
1706
- debug_assert_eq ! ( self . inner . end. add( Group :: WIDTH ) , tail. inner . next_ctrl) ;
1710
+ debug_assert_eq ! ( self . end, tail. end) ;
1711
+ self . end = self . next_ctrl . add ( mid) ;
1712
+ debug_assert_eq ! ( self . end. add( Group :: WIDTH ) , tail. next_ctrl) ;
1707
1713
( self , Some ( tail) )
1708
1714
}
1709
1715
}
1710
1716
}
1711
1717
}
1712
1718
1713
- impl RawIterRangeInner {
1714
- /// Returns a `RawIterRange` covering a subset of a table.
1715
- ///
1716
- /// The control byte address must be aligned to the group size.
1717
- #[ inline]
1718
- unsafe fn new ( ctrl : * const u8 , len : usize ) -> Self {
1719
- debug_assert_ne ! ( len, 0 ) ;
1720
- debug_assert_eq ! ( ctrl as usize % Group :: WIDTH , 0 ) ;
1721
- let end = ctrl. add ( len) ;
1722
-
1723
- // Load the first group and advance ctrl to point to the next group
1724
- let current_group = Group :: load_aligned ( ctrl) . match_full ( ) ;
1725
- let next_ctrl = ctrl. add ( Group :: WIDTH ) ;
1726
-
1727
- Self {
1728
- current_group,
1729
- next_ctrl,
1730
- end,
1731
- }
1732
- }
1733
-
1734
- #[ inline]
1735
- unsafe fn next_group ( & mut self ) -> Option < ( ) > {
1736
- if self . next_ctrl >= self . end {
1737
- None
1738
- } else {
1739
- self . current_group = Group :: load_aligned ( self . next_ctrl ) . match_full ( ) ;
1740
- self . next_ctrl = self . next_ctrl . add ( Group :: WIDTH ) ;
1741
- Some ( ( ) )
1742
- }
1743
- }
1744
-
1745
- #[ inline]
1746
- fn size_hint ( & self ) -> ( usize , Option < usize > ) {
1747
- // We don't have an item count, so just guess based on the range size.
1748
- (
1749
- 0 ,
1750
- Some ( unsafe { offset_from ( self . end , self . next_ctrl ) + Group :: WIDTH } ) ,
1751
- )
1752
- }
1753
- }
1754
-
1755
1719
// We make raw iterators unconditionally Send and Sync, and let the PhantomData
1756
1720
// in the actual iterator implementations determine the real Send/Sync bounds.
1757
1721
unsafe impl < T > Send for RawIterRange < T > { }
@@ -1761,8 +1725,10 @@ impl<T> Clone for RawIterRange<T> {
1761
1725
#[ cfg_attr( feature = "inline-more" , inline) ]
1762
1726
fn clone ( & self ) -> Self {
1763
1727
Self {
1764
- inner : self . inner . clone ( ) ,
1765
1728
data : self . data . clone ( ) ,
1729
+ next_ctrl : self . next_ctrl ,
1730
+ current_group : self . current_group ,
1731
+ end : self . end ,
1766
1732
}
1767
1733
}
1768
1734
}
@@ -1774,24 +1740,34 @@ impl<T> Iterator for RawIterRange<T> {
1774
1740
fn next ( & mut self ) -> Option < Bucket < T > > {
1775
1741
unsafe {
1776
1742
loop {
1777
- if let Some ( index) = self . inner . current_group . take_next_bit ( ) {
1743
+ if let Some ( index) = self . current_group . lowest_set_bit ( ) {
1744
+ self . current_group = self . current_group . remove_lowest_bit ( ) ;
1778
1745
return Some ( self . data . next_n ( index) ) ;
1779
1746
}
1780
1747
1748
+ if self . next_ctrl >= self . end {
1749
+ return None ;
1750
+ }
1751
+
1781
1752
// We might read past self.end up to the next group boundary,
1782
1753
// but this is fine because it only occurs on tables smaller
1783
1754
// than the group size where the trailing control bytes are all
1784
1755
// EMPTY. On larger tables self.end is guaranteed to be aligned
1785
1756
// to the group size (since tables are power-of-two sized).
1786
- self . inner . next_group ( ) ? ;
1757
+ self . current_group = Group :: load_aligned ( self . next_ctrl ) . match_full ( ) ;
1787
1758
self . data = self . data . next_n ( Group :: WIDTH ) ;
1759
+ self . next_ctrl = self . next_ctrl . add ( Group :: WIDTH ) ;
1788
1760
}
1789
1761
}
1790
1762
}
1791
1763
1792
1764
#[ cfg_attr( feature = "inline-more" , inline) ]
1793
1765
fn size_hint ( & self ) -> ( usize , Option < usize > ) {
1794
- self . inner . size_hint ( )
1766
+ // We don't have an item count, so just guess based on the range size.
1767
+ (
1768
+ 0 ,
1769
+ Some ( unsafe { offset_from ( self . end , self . next_ctrl ) + Group :: WIDTH } ) ,
1770
+ )
1795
1771
}
1796
1772
}
1797
1773
0 commit comments