Skip to content

Commit dfaddb7

Browse files
committed
Auto merge of #27807 - pczarn:arena-internals, r=bluss
Fixes #18037 "TypedArena cannot handle zero-sized types". Closes #17931 "improve chunk allocation scheme used by Arena / TypedArena". Closes #22847 "TypedArena should implement Send". - N.B. Arena cannot implement Send, since it may contain non-Send values. Closes #18471 "`Arena::alloc_copy_inner` (at least) should be renamed and made public." - Added `Arena::alloc_bytes`. Closes #18261 "support clearing TypedArena with the chunks preserved". - Only the largest chunk is preserved.
2 parents 69e1f57 + e2ccc4f commit dfaddb7

File tree

2 files changed

+577
-227
lines changed

2 files changed

+577
-227
lines changed

src/liballoc/raw_vec.rs

+107-11
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,47 @@ impl<T> RawVec<T> {
240240
}
241241
}
242242

243+
/// Attempts to double the size of the type's backing allocation in place. This is common
244+
/// enough to want to do that it's easiest to just have a dedicated method. Slightly
245+
/// more efficient logic can be provided for this than the general case.
246+
///
247+
/// Returns true if the reallocation attempt has succeeded, or false otherwise.
248+
///
249+
/// # Panics
250+
///
251+
/// * Panics if T is zero-sized on the assumption that you managed to exhaust
252+
/// all `usize::MAX` slots in your imaginary buffer.
253+
/// * Panics on 32-bit platforms if the requested capacity exceeds
254+
/// `isize::MAX` bytes.
255+
#[inline(never)]
256+
#[cold]
257+
pub fn double_in_place(&mut self) -> bool {
258+
unsafe {
259+
let elem_size = mem::size_of::<T>();
260+
let align = mem::align_of::<T>();
261+
262+
// since we set the capacity to usize::MAX when elem_size is
263+
// 0, getting to here necessarily means the RawVec is overfull.
264+
assert!(elem_size != 0, "capacity overflow");
265+
266+
// Since we guarantee that we never allocate more than isize::MAX bytes,
267+
// `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow
268+
let new_cap = 2 * self.cap;
269+
let new_alloc_size = new_cap * elem_size;
270+
271+
alloc_guard(new_alloc_size);
272+
let size = heap::reallocate_inplace(self.ptr() as *mut _,
273+
self.cap * elem_size,
274+
new_alloc_size,
275+
align);
276+
if size >= new_alloc_size {
277+
// We can't directly divide `size`.
278+
self.cap = new_cap;
279+
}
280+
size >= new_alloc_size
281+
}
282+
}
283+
243284
/// Ensures that the buffer contains at least enough space to hold
244285
/// `used_cap + needed_extra_cap` elements. If it doesn't already,
245286
/// will reallocate the minimum possible amount of memory necessary.
@@ -300,6 +341,22 @@ impl<T> RawVec<T> {
300341
}
301342
}
302343

344+
/// Calculates the buffer's new size given that it'll hold `used_cap +
345+
/// needed_extra_cap` elements. This logic is used in amortized reserve methods.
346+
/// Returns `(new_capacity, new_alloc_size)`.
347+
fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize) -> (usize, usize) {
348+
let elem_size = mem::size_of::<T>();
349+
// Nothing we can really do about these checks :(
350+
let required_cap = used_cap.checked_add(needed_extra_cap)
351+
.expect("capacity overflow");
352+
// Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
353+
let double_cap = self.cap * 2;
354+
// `double_cap` guarantees exponential growth.
355+
let new_cap = cmp::max(double_cap, required_cap);
356+
let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
357+
(new_cap, new_alloc_size)
358+
}
359+
303360
/// Ensures that the buffer contains at least enough space to hold
304361
/// `used_cap + needed_extra_cap` elements. If it doesn't already have
305362
/// enough capacity, will reallocate enough space plus comfortable slack
@@ -360,17 +417,7 @@ impl<T> RawVec<T> {
360417
return;
361418
}
362419

363-
// Nothing we can really do about these checks :(
364-
let required_cap = used_cap.checked_add(needed_extra_cap)
365-
.expect("capacity overflow");
366-
367-
// Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
368-
let double_cap = self.cap * 2;
369-
370-
// `double_cap` guarantees exponential growth.
371-
let new_cap = cmp::max(double_cap, required_cap);
372-
373-
let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
420+
let (new_cap, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap);
374421
// FIXME: may crash and burn on over-reserve
375422
alloc_guard(new_alloc_size);
376423

@@ -393,6 +440,55 @@ impl<T> RawVec<T> {
393440
}
394441
}
395442

443+
/// Attempts to ensure that the buffer contains at least enough space to hold
444+
/// `used_cap + needed_extra_cap` elements. If it doesn't already have
445+
/// enough capacity, will reallocate in place enough space plus comfortable slack
446+
/// space to get amortized `O(1)` behaviour. Will limit this behaviour
447+
/// if it would needlessly cause itself to panic.
448+
///
449+
/// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
450+
/// the requested space. This is not really unsafe, but the unsafe
451+
/// code *you* write that relies on the behaviour of this function may break.
452+
///
453+
/// Returns true if the reallocation attempt has succeeded, or false otherwise.
454+
///
455+
/// # Panics
456+
///
457+
/// * Panics if the requested capacity exceeds `usize::MAX` bytes.
458+
/// * Panics on 32-bit platforms if the requested capacity exceeds
459+
/// `isize::MAX` bytes.
460+
pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> bool {
461+
unsafe {
462+
let elem_size = mem::size_of::<T>();
463+
let align = mem::align_of::<T>();
464+
465+
// NOTE: we don't early branch on ZSTs here because we want this
466+
// to actually catch "asking for more than usize::MAX" in that case.
467+
// If we make it past the first branch then we are guaranteed to
468+
// panic.
469+
470+
// Don't actually need any more capacity. If the current `cap` is 0, we can't
471+
// reallocate in place.
472+
// Wrapping in case they give a bad `used_cap`
473+
if self.cap().wrapping_sub(used_cap) >= needed_extra_cap || self.cap == 0 {
474+
return false;
475+
}
476+
477+
let (_, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap);
478+
// FIXME: may crash and burn on over-reserve
479+
alloc_guard(new_alloc_size);
480+
481+
let size = heap::reallocate_inplace(self.ptr() as *mut _,
482+
self.cap * elem_size,
483+
new_alloc_size,
484+
align);
485+
if size >= new_alloc_size {
486+
self.cap = new_alloc_size / elem_size;
487+
}
488+
size >= new_alloc_size
489+
}
490+
}
491+
396492
/// Shrinks the allocation down to the specified amount. If the given amount
397493
/// is 0, actually completely deallocates.
398494
///

0 commit comments

Comments
 (0)