Skip to content

Commit ce82a7d

Browse files
authored
Fix formatting and broken links in documentation (#577)
1 parent 2117612 commit ce82a7d

File tree

6 files changed

+80
-45
lines changed

6 files changed

+80
-45
lines changed

src/util/alloc/allocator.rs

Lines changed: 10 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ pub trait Allocator<VM: VMBinding>: Downcast {
118118
/// Return the [`VMThread`] associated with this allocator instance.
119119
fn get_tls(&self) -> VMThread;
120120

121-
/// Return the [`Space`] instance associated with this allocator instance.
121+
/// Return the [`Space`](src/policy/space/Space) instance associated with this allocator instance.
122122
fn get_space(&self) -> &'static dyn Space<VM>;
123123

124124
/// Return the [`Plan`] instance that this allocator instance is associated with.
@@ -129,9 +129,9 @@ pub trait Allocator<VM: VMBinding>: Downcast {
129129
fn does_thread_local_allocation(&self) -> bool;
130130

131131
/// Return at which granularity the allocator acquires memory from the global space and use
132-
/// them as thread local buffer. For example, the [`BumpAllocator`] acquires memory at 32KB
132+
/// them as thread local buffer. For example, the [`BumpAllocator`](crate::util::alloc::BumpAllocator) acquires memory at 32KB
133133
/// blocks. Depending on the actual size for the current object, they always acquire memory of
134-
/// N*32KB (N>=1). Thus the [`BumpAllocator`] returns 32KB for this method. Only allocators
134+
/// N*32KB (N>=1). Thus the [`BumpAllocator`](crate::util::alloc::BumpAllocator) returns 32KB for this method. Only allocators
135135
/// that do thread local allocation need to implement this method.
136136
fn get_thread_local_buffer_granularity(&self) -> usize {
137137
assert!(self.does_thread_local_allocation(), "An allocator that does not thread local allocation does not have a buffer granularity.");
@@ -140,15 +140,15 @@ pub trait Allocator<VM: VMBinding>: Downcast {
140140

141141
/// An allocation attempt. The implementation of this function depends on the allocator used.
142142
/// If an allocator supports thread local allocations, then the allocation will be serviced
143-
/// from its TLAB, otherwise it will default to using the slowpath, i.e. [`alloc_slow`].
143+
/// from its TLAB, otherwise it will default to using the slowpath, i.e. [`alloc_slow`](Allocator::alloc_slow).
144144
///
145145
/// Note that in the case where the VM is out of memory, we invoke
146146
/// [`Collection::out_of_memory`] to inform the binding and then return a null pointer back to
147147
/// it. We have no assumptions on whether the VM will continue executing or abort immediately.
148148
///
149149
/// An allocator needs to make sure the object reference for the returned address is in the same
150150
/// chunk as the returned address (so the side metadata and the SFT for an object reference is valid).
151-
/// See [`crate::util::alloc::object_ref_guard`].
151+
/// See [`crate::util::alloc::object_ref_guard`](util/alloc/object_ref_guard).
152152
///
153153
/// Arguments:
154154
/// * `size`: the allocation size in bytes.
@@ -170,9 +170,9 @@ pub trait Allocator<VM: VMBinding>: Downcast {
170170

171171
/// Slowpath allocation attempt. This function executes the actual slowpath allocation. A
172172
/// slowpath allocation in MMTk attempts to allocate the object using the per-allocator
173-
/// definition of [`alloc_slow_once`]. This function also accounts for increasing the
173+
/// definition of [`alloc_slow_once`](Allocator::alloc_slow_once). This function also accounts for increasing the
174174
/// allocation bytes in order to support stress testing. In case precise stress testing is
175-
/// being used, the [`alloc_slow_once_precise_stress`] function is used instead.
175+
/// being used, the [`alloc_slow_once_precise_stress`](Allocator::alloc_slow_once_precise_stress) function is used instead.
176176
///
177177
/// Note that in the case where the VM is out of memory, we invoke
178178
/// [`Collection::out_of_memory`] with a [`AllocationError::HeapOutOfMemory`] error to inform
@@ -194,6 +194,7 @@ pub trait Allocator<VM: VMBinding>: Downcast {
194194
// Information about the previous collection.
195195
let mut emergency_collection = false;
196196
let mut previous_result_zero = false;
197+
197198
loop {
198199
// Try to allocate using the slow path
199200
let result = if is_mutator && stress_test && plan.is_precise_stress() {
@@ -224,13 +225,7 @@ pub trait Allocator<VM: VMBinding>: Downcast {
224225
plan.allocation_success.store(true, Ordering::SeqCst);
225226
}
226227

227-
// When a GC occurs, the resultant address provided by `acquire()` is 0x0.
228-
// Hence, another iteration of this loop occurs. In such a case, the second
229-
// iteration tries to allocate again, and if is successful, then the allocation
230-
// bytes are updated. However, this leads to double counting of the allocation:
231-
// (i) by the original alloc_slow_inline(); and (ii) by the alloc_slow_inline()
232-
// called by acquire(). In order to not double count the allocation, we only
233-
// update allocation bytes if the previous result wasn't 0x0.
228+
// Only update the allocation bytes if we haven't failed a previous allocation in this loop
234229
if stress_test && self.get_plan().is_initialized() && !previous_result_zero {
235230
let allocated_size =
236231
if plan.is_precise_stress() || !self.does_thread_local_allocation() {
@@ -302,7 +297,7 @@ pub trait Allocator<VM: VMBinding>: Downcast {
302297
}
303298
}
304299

305-
/// Single slow path allocation attempt. This is called by [`alloc_slow_inline`]. The
300+
/// Single slow path allocation attempt. This is called by [`alloc_slow_inline`](Allocator::alloc_slow_inline). The
306301
/// implementation of this function depends on the allocator used. Generally, if an allocator
307302
/// supports thread local allocations, it will try to allocate more TLAB space here. If it
308303
/// doesn't, then (generally) the allocator simply allocates enough space for the current

src/util/alloc/bumpallocator.rs

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,15 @@ const BLOCK_MASK: usize = BLOCK_SIZE - 1;
1616

1717
#[repr(C)]
1818
pub struct BumpAllocator<VM: VMBinding> {
19+
/// [`VMThread`] associated with this allocator instance
1920
pub tls: VMThread,
21+
/// Current cursor for bump pointer
2022
cursor: Address,
23+
/// Limit for bump pointer
2124
limit: Address,
25+
/// [`Space`](src/policy/space/Space) instance associated with this allocator instance.
2226
space: &'static dyn Space<VM>,
27+
/// [`Plan`] instance that this allocator instance is associated with.
2328
plan: &'static dyn Plan<VM = VM>,
2429
}
2530

@@ -44,12 +49,15 @@ impl<VM: VMBinding> Allocator<VM> for BumpAllocator<VM> {
4449
fn get_space(&self) -> &'static dyn Space<VM> {
4550
self.space
4651
}
52+
4753
fn get_plan(&self) -> &'static dyn Plan<VM = VM> {
4854
self.plan
4955
}
56+
5057
fn does_thread_local_allocation(&self) -> bool {
5158
true
5259
}
60+
5361
fn get_thread_local_buffer_granularity(&self) -> usize {
5462
BLOCK_SIZE
5563
}
@@ -81,11 +89,14 @@ impl<VM: VMBinding> Allocator<VM> for BumpAllocator<VM> {
8189
self.acquire_block(size, align, offset, false)
8290
}
8391

84-
// Slow path for allocation if the precise stress test has been enabled.
85-
// It works by manipulating the limit to be below the cursor always.
86-
// Performs three kinds of allocations: (i) if the hard limit has been met;
87-
// (ii) the bump pointer semantics from the fastpath; and (iii) if the stress
88-
// factor has been crossed.
92+
/// Slow path for allocation if precise stress testing has been enabled.
93+
/// It works by manipulating the limit to be always below the cursor.
94+
/// Can have three different cases:
95+
/// - acquires a new block if the hard limit has been met;
96+
/// - allocates an object using the bump pointer semantics from the
97+
/// fastpath if there is sufficient space; and
98+
/// - does not allocate an object but forces a poll for GC if the stress
99+
/// factor has been crossed.
89100
fn alloc_slow_once_precise_stress(
90101
&mut self,
91102
size: usize,

src/util/alloc/immix_allocator.rs

Lines changed: 38 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,9 @@ pub struct ImmixAllocator<VM: VMBinding> {
1818
cursor: Address,
1919
/// Limit for bump pointer
2020
limit: Address,
21+
/// [`Space`](src/policy/space/Space) instance associated with this allocator instance.
2122
space: &'static ImmixSpace<VM>,
23+
/// [`Plan`] instance that this allocator instance is associated with.
2224
plan: &'static dyn Plan<VM = VM>,
2325
/// *unused*
2426
hot: bool,
@@ -207,7 +209,7 @@ impl<VM: VMBinding> ImmixAllocator<VM> {
207209
self.space
208210
}
209211

210-
/// Large-object (larger than a line) bump alloaction.
212+
/// Large-object (larger than a line) bump allocation.
211213
fn overflow_alloc(&mut self, size: usize, align: usize, offset: isize) -> Address {
212214
trace!("{:?}: overflow_alloc", self.tls);
213215
let start = align_allocation_no_fill::<VM>(self.large_cursor, align, offset);
@@ -293,7 +295,12 @@ impl<VM: VMBinding> ImmixAllocator<VM> {
293295
match self.immix_space().get_clean_block(self.tls, self.copy) {
294296
None => Address::ZERO,
295297
Some(block) => {
296-
trace!("{:?}: Acquired a new block {:?}", self.tls, block);
298+
trace!(
299+
"{:?}: Acquired a new block {:?} -> {:?}",
300+
self.tls,
301+
block.start(),
302+
block.end()
303+
);
297304
if self.request_for_large {
298305
self.large_cursor = block.start();
299306
self.large_limit = adjust_thread_local_buffer_limit::<VM>(block.end());
@@ -306,54 +313,65 @@ impl<VM: VMBinding> ImmixAllocator<VM> {
306313
}
307314
}
308315

309-
/// Set fake limits for the bump allocation for stress tests. The fake limit is the remaining thread local buffer size,
310-
/// which should be always smaller than the bump cursor.
311-
/// This method may be reentrant. We need to check before setting the values.
316+
/// Set fake limits for the bump allocation for stress tests. The fake limit is the remaining
317+
/// thread local buffer size, which should be always smaller than the bump cursor. This method
318+
/// may be reentrant. We need to check before setting the values.
312319
fn set_limit_for_stress(&mut self) {
313320
if self.cursor < self.limit {
321+
let old_limit = self.limit;
314322
let new_limit = unsafe { Address::from_usize(self.limit - self.cursor) };
315323
self.limit = new_limit;
316324
trace!(
317-
"{:?}: set_limit_for_stress. normal {} -> {}",
325+
"{:?}: set_limit_for_stress. normal c {} l {} -> {}",
318326
self.tls,
319-
self.limit,
320-
new_limit
327+
self.cursor,
328+
old_limit,
329+
new_limit,
321330
);
322331
}
332+
323333
if self.large_cursor < self.large_limit {
334+
let old_lg_limit = self.large_limit;
324335
let new_lg_limit = unsafe { Address::from_usize(self.large_limit - self.large_cursor) };
325336
self.large_limit = new_lg_limit;
326337
trace!(
327-
"{:?}: set_limit_for_stress. large {} -> {}",
338+
"{:?}: set_limit_for_stress. large c {} l {} -> {}",
328339
self.tls,
329-
self.large_limit,
330-
new_lg_limit
340+
self.large_cursor,
341+
old_lg_limit,
342+
new_lg_limit,
331343
);
332344
}
333345
}
334346

335-
/// Restore the real limits for the bump allocation so we can do a properly thread local allocation.
336-
/// The fake limit is the remaining thread local buffer size, and we restore the actual limit from the size and the cursor.
337-
/// This method may be reentrant. We need to check before setting the values.
347+
/// Restore the real limits for the bump allocation so we can properly do a thread local
348+
/// allocation. The fake limit is the remaining thread local buffer size, and we restore the
349+
/// actual limit from the size and the cursor. This method may be reentrant. We need to check
350+
/// before setting the values.
338351
fn restore_limit_for_stress(&mut self) {
339352
if self.limit < self.cursor {
353+
let old_limit = self.limit;
340354
let new_limit = self.cursor + self.limit.as_usize();
341355
self.limit = new_limit;
342356
trace!(
343-
"{:?}: restore_limit_for_stress. normal {} -> {}",
357+
"{:?}: restore_limit_for_stress. normal c {} l {} -> {}",
344358
self.tls,
345-
self.limit,
346-
new_limit
359+
self.cursor,
360+
old_limit,
361+
new_limit,
347362
);
348363
}
364+
349365
if self.large_limit < self.large_cursor {
366+
let old_lg_limit = self.large_limit;
350367
let new_lg_limit = self.large_cursor + self.large_limit.as_usize();
351368
self.large_limit = new_lg_limit;
352369
trace!(
353-
"{:?}: restore_limit_for_stress. large {} -> {}",
370+
"{:?}: restore_limit_for_stress. large c {} l {} -> {}",
354371
self.tls,
355-
self.large_limit,
356-
new_lg_limit
372+
self.large_cursor,
373+
old_lg_limit,
374+
new_lg_limit,
357375
);
358376
}
359377
}

src/util/alloc/large_object_allocator.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,11 @@ use crate::vm::VMBinding;
88

99
#[repr(C)]
1010
pub struct LargeObjectAllocator<VM: VMBinding> {
11+
/// [`VMThread`] associated with this allocator instance
1112
pub tls: VMThread,
13+
/// [`Space`](src/policy/space/Space) instance associated with this allocator instance.
1214
space: &'static LargeObjectSpace<VM>,
15+
/// [`Plan`] instance that this allocator instance is associated with.
1316
plan: &'static dyn Plan<VM = VM>,
1417
}
1518

src/util/alloc/malloc_allocator.rs

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,18 +9,23 @@ use crate::Plan;
99

1010
#[repr(C)]
1111
pub struct MallocAllocator<VM: VMBinding> {
12+
/// [`VMThread`] associated with this allocator instance
1213
pub tls: VMThread,
14+
/// [`Space`](src/policy/space/Space) instance associated with this allocator instance.
1315
space: &'static MallocSpace<VM>,
16+
/// [`Plan`] instance that this allocator instance is associated with.
1417
plan: &'static dyn Plan<VM = VM>,
1518
}
1619

1720
impl<VM: VMBinding> Allocator<VM> for MallocAllocator<VM> {
1821
fn get_space(&self) -> &'static dyn Space<VM> {
1922
self.space as &'static dyn Space<VM>
2023
}
24+
2125
fn get_plan(&self) -> &'static dyn Plan<VM = VM> {
2226
self.plan
2327
}
28+
2429
fn alloc(&mut self, size: usize, align: usize, offset: isize) -> Address {
2530
self.alloc_slow(size, align, offset)
2631
}

src/util/alloc/markcompact_allocator.rs

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -66,11 +66,14 @@ impl<VM: VMBinding> Allocator<VM> for MarkCompactAllocator<VM> {
6666
self.bump_allocator.alloc_slow_once(size, align, offset)
6767
}
6868

69-
// Slow path for allocation if the precise stress test has been enabled.
70-
// It works by manipulating the limit to be below the cursor always.
71-
// Performs three kinds of allocations: (i) if the hard limit has been met;
72-
// (ii) the bump pointer semantics from the fastpath; and (iii) if the stress
73-
// factor has been crossed.
69+
/// Slow path for allocation if precise stress testing has been enabled.
70+
/// It works by manipulating the limit to be always below the cursor.
71+
/// Can have three different cases:
72+
/// - acquires a new block if the hard limit has been met;
73+
/// - allocates an object using the bump pointer semantics from the
74+
/// fastpath if there is sufficient space; and
75+
/// - does not allocate an object but forces a poll for GC if the stress
76+
/// factor has been crossed.
7477
fn alloc_slow_once_precise_stress(
7578
&mut self,
7679
size: usize,

0 commit comments

Comments
 (0)