Skip to content

Commit 50ae8ee

Browse files
author
Markus Westerlind
committed
refactor
1 parent ca16ad6 commit 50ae8ee

File tree

2 files changed

+36
-70
lines changed

2 files changed

+36
-70
lines changed

src/raw/bitmask.rs

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -61,16 +61,6 @@ impl BitMask {
6161
}
6262
}
6363

64-
#[inline]
65-
pub fn take_next_bit(&mut self) -> Option<usize> {
66-
if let Some(index) = self.lowest_set_bit() {
67-
*self = self.remove_lowest_bit();
68-
Some(index)
69-
} else {
70-
None
71-
}
72-
}
73-
7464
/// Returns the first set bit in the `BitMask`, if there is one. The
7565
/// bitmask must not be empty.
7666
#[inline]

src/raw/mod.rs

Lines changed: 36 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -1634,17 +1634,13 @@ impl<T, A: AllocRef + Clone> IntoIterator for RawTable<T, A> {
16341634
/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
16351635
/// not track an item count.
16361636
pub(crate) struct RawIterRange<T> {
1637-
inner: RawIterRangeInner,
1638-
// Pointer to the buckets for the current group.
1639-
data: Bucket<T>,
1640-
}
1641-
1642-
#[derive(Clone)]
1643-
pub(crate) struct RawIterRangeInner {
16441637
// Mask of full buckets in the current group. Bits are cleared from this
16451638
// mask as each element is processed.
16461639
current_group: BitMask,
16471640

1641+
// Pointer to the buckets for the current group.
1642+
data: Bucket<T>,
1643+
16481644
// Pointer to the next group of control bytes,
16491645
// Must be aligned to the group size.
16501646
next_ctrl: *const u8,
@@ -1659,9 +1655,19 @@ impl<T> RawIterRange<T> {
16591655
/// The control byte address must be aligned to the group size.
16601656
#[cfg_attr(feature = "inline-more", inline)]
16611657
unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
1658+
debug_assert_ne!(len, 0);
1659+
debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
1660+
let end = ctrl.add(len);
1661+
1662+
// Load the first group and advance ctrl to point to the next group
1663+
let current_group = Group::load_aligned(ctrl).match_full();
1664+
let next_ctrl = ctrl.add(Group::WIDTH);
1665+
16621666
Self {
1663-
inner: RawIterRangeInner::new(ctrl, len),
1667+
current_group,
16641668
data,
1669+
next_ctrl,
1670+
end,
16651671
}
16661672
}
16671673

@@ -1673,15 +1679,15 @@ impl<T> RawIterRange<T> {
16731679
#[cfg(feature = "rayon")]
16741680
pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
16751681
unsafe {
1676-
if self.inner.end <= self.inner.next_ctrl {
1682+
if self.end <= self.next_ctrl {
16771683
// Nothing to split if the group that we are current processing
16781684
// is the last one.
16791685
(self, None)
16801686
} else {
16811687
// len is the remaining number of elements after the group that
16821688
// we are currently processing. It must be a multiple of the
16831689
// group size (small tables are caught by the check above).
1684-
let len = offset_from(self.inner.end, self.inner.next_ctrl);
1690+
let len = offset_from(self.end, self.next_ctrl);
16851691
debug_assert_eq!(len % Group::WIDTH, 0);
16861692

16871693
// Split the remaining elements into two halves, but round the
@@ -1693,65 +1699,23 @@ impl<T> RawIterRange<T> {
16931699
let mid = (len / 2) & !(Group::WIDTH - 1);
16941700

16951701
let tail = Self::new(
1696-
self.inner.next_ctrl.add(mid),
1702+
self.next_ctrl.add(mid),
16971703
self.data.next_n(Group::WIDTH).next_n(mid),
16981704
len - mid,
16991705
);
17001706
debug_assert_eq!(
17011707
self.data.next_n(Group::WIDTH).next_n(mid).ptr,
17021708
tail.data.ptr
17031709
);
1704-
debug_assert_eq!(self.inner.end, tail.inner.end);
1705-
self.inner.end = self.inner.next_ctrl.add(mid);
1706-
debug_assert_eq!(self.inner.end.add(Group::WIDTH), tail.inner.next_ctrl);
1710+
debug_assert_eq!(self.end, tail.end);
1711+
self.end = self.next_ctrl.add(mid);
1712+
debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
17071713
(self, Some(tail))
17081714
}
17091715
}
17101716
}
17111717
}
17121718

1713-
impl RawIterRangeInner {
1714-
/// Returns a `RawIterRange` covering a subset of a table.
1715-
///
1716-
/// The control byte address must be aligned to the group size.
1717-
#[inline]
1718-
unsafe fn new(ctrl: *const u8, len: usize) -> Self {
1719-
debug_assert_ne!(len, 0);
1720-
debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
1721-
let end = ctrl.add(len);
1722-
1723-
// Load the first group and advance ctrl to point to the next group
1724-
let current_group = Group::load_aligned(ctrl).match_full();
1725-
let next_ctrl = ctrl.add(Group::WIDTH);
1726-
1727-
Self {
1728-
current_group,
1729-
next_ctrl,
1730-
end,
1731-
}
1732-
}
1733-
1734-
#[inline]
1735-
unsafe fn next_group(&mut self) -> Option<()> {
1736-
if self.next_ctrl >= self.end {
1737-
None
1738-
} else {
1739-
self.current_group = Group::load_aligned(self.next_ctrl).match_full();
1740-
self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
1741-
Some(())
1742-
}
1743-
}
1744-
1745-
#[inline]
1746-
fn size_hint(&self) -> (usize, Option<usize>) {
1747-
// We don't have an item count, so just guess based on the range size.
1748-
(
1749-
0,
1750-
Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }),
1751-
)
1752-
}
1753-
}
1754-
17551719
// We make raw iterators unconditionally Send and Sync, and let the PhantomData
17561720
// in the actual iterator implementations determine the real Send/Sync bounds.
17571721
unsafe impl<T> Send for RawIterRange<T> {}
@@ -1761,8 +1725,10 @@ impl<T> Clone for RawIterRange<T> {
17611725
#[cfg_attr(feature = "inline-more", inline)]
17621726
fn clone(&self) -> Self {
17631727
Self {
1764-
inner: self.inner.clone(),
17651728
data: self.data.clone(),
1729+
next_ctrl: self.next_ctrl,
1730+
current_group: self.current_group,
1731+
end: self.end,
17661732
}
17671733
}
17681734
}
@@ -1774,24 +1740,34 @@ impl<T> Iterator for RawIterRange<T> {
17741740
fn next(&mut self) -> Option<Bucket<T>> {
17751741
unsafe {
17761742
loop {
1777-
if let Some(index) = self.inner.current_group.take_next_bit() {
1743+
if let Some(index) = self.current_group.lowest_set_bit() {
1744+
self.current_group = self.current_group.remove_lowest_bit();
17781745
return Some(self.data.next_n(index));
17791746
}
17801747

1748+
if self.next_ctrl >= self.end {
1749+
return None;
1750+
}
1751+
17811752
// We might read past self.end up to the next group boundary,
17821753
// but this is fine because it only occurs on tables smaller
17831754
// than the group size where the trailing control bytes are all
17841755
// EMPTY. On larger tables self.end is guaranteed to be aligned
17851756
// to the group size (since tables are power-of-two sized).
1786-
self.inner.next_group()?;
1757+
self.current_group = Group::load_aligned(self.next_ctrl).match_full();
17871758
self.data = self.data.next_n(Group::WIDTH);
1759+
self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
17881760
}
17891761
}
17901762
}
17911763

17921764
#[cfg_attr(feature = "inline-more", inline)]
17931765
fn size_hint(&self) -> (usize, Option<usize>) {
1794-
self.inner.size_hint()
1766+
// We don't have an item count, so just guess based on the range size.
1767+
(
1768+
0,
1769+
Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }),
1770+
)
17951771
}
17961772
}
17971773

0 commit comments

Comments
 (0)