@@ -118,7 +118,7 @@ pub trait Allocator<VM: VMBinding>: Downcast {
118
118
/// Return the [`VMThread`] associated with this allocator instance.
119
119
fn get_tls ( & self ) -> VMThread ;
120
120
121
- /// Return the [`Space`] instance associated with this allocator instance.
121
+ /// Return the [`Space`](src/policy/space/Space) instance associated with this allocator instance.
122
122
fn get_space ( & self ) -> & ' static dyn Space < VM > ;
123
123
124
124
/// Return the [`Plan`] instance that this allocator instance is associated with.
@@ -129,9 +129,9 @@ pub trait Allocator<VM: VMBinding>: Downcast {
129
129
fn does_thread_local_allocation ( & self ) -> bool ;
130
130
131
131
/// Return at which granularity the allocator acquires memory from the global space and use
132
- /// them as thread local buffer. For example, the [`BumpAllocator`] acquires memory at 32KB
132
+ /// them as thread local buffer. For example, the [`BumpAllocator`](crate::util::alloc::BumpAllocator) acquires memory at 32KB
133
133
/// blocks. Depending on the actual size for the current object, they always acquire memory of
134
- /// N*32KB (N>=1). Thus the [`BumpAllocator`] returns 32KB for this method. Only allocators
134
+ /// N*32KB (N>=1). Thus the [`BumpAllocator`](crate::util::alloc::BumpAllocator) returns 32KB for this method. Only allocators
135
135
/// that do thread local allocation need to implement this method.
136
136
fn get_thread_local_buffer_granularity ( & self ) -> usize {
137
137
assert ! ( self . does_thread_local_allocation( ) , "An allocator that does not thread local allocation does not have a buffer granularity." ) ;
@@ -140,15 +140,15 @@ pub trait Allocator<VM: VMBinding>: Downcast {
140
140
141
141
/// An allocation attempt. The implementation of this function depends on the allocator used.
142
142
/// If an allocator supports thread local allocations, then the allocation will be serviced
143
- /// from its TLAB, otherwise it will default to using the slowpath, i.e. [`alloc_slow`].
143
+ /// from its TLAB, otherwise it will default to using the slowpath, i.e. [`alloc_slow`](Allocator::alloc_slow) .
144
144
///
145
145
/// Note that in the case where the VM is out of memory, we invoke
146
146
/// [`Collection::out_of_memory`] to inform the binding and then return a null pointer back to
147
147
/// it. We have no assumptions on whether the VM will continue executing or abort immediately.
148
148
///
149
149
/// An allocator needs to make sure the object reference for the returned address is in the same
150
150
/// chunk as the returned address (so the side metadata and the SFT for an object reference is valid).
151
- /// See [`crate::util::alloc::object_ref_guard`].
151
+ /// See [`crate::util::alloc::object_ref_guard`](util/alloc/object_ref_guard) .
152
152
///
153
153
/// Arguments:
154
154
/// * `size`: the allocation size in bytes.
@@ -170,9 +170,9 @@ pub trait Allocator<VM: VMBinding>: Downcast {
170
170
171
171
/// Slowpath allocation attempt. This function executes the actual slowpath allocation. A
172
172
/// slowpath allocation in MMTk attempts to allocate the object using the per-allocator
173
- /// definition of [`alloc_slow_once`]. This function also accounts for increasing the
173
+ /// definition of [`alloc_slow_once`](Allocator::alloc_slow_once) . This function also accounts for increasing the
174
174
/// allocation bytes in order to support stress testing. In case precise stress testing is
175
- /// being used, the [`alloc_slow_once_precise_stress`] function is used instead.
175
+ /// being used, the [`alloc_slow_once_precise_stress`](Allocator::alloc_slow_once_precise_stress) function is used instead.
176
176
///
177
177
/// Note that in the case where the VM is out of memory, we invoke
178
178
/// [`Collection::out_of_memory`] with a [`AllocationError::HeapOutOfMemory`] error to inform
@@ -194,6 +194,7 @@ pub trait Allocator<VM: VMBinding>: Downcast {
194
194
// Information about the previous collection.
195
195
let mut emergency_collection = false ;
196
196
let mut previous_result_zero = false ;
197
+
197
198
loop {
198
199
// Try to allocate using the slow path
199
200
let result = if is_mutator && stress_test && plan. is_precise_stress ( ) {
@@ -224,13 +225,7 @@ pub trait Allocator<VM: VMBinding>: Downcast {
224
225
plan. allocation_success . store ( true , Ordering :: SeqCst ) ;
225
226
}
226
227
227
- // When a GC occurs, the resultant address provided by `acquire()` is 0x0.
228
- // Hence, another iteration of this loop occurs. In such a case, the second
229
- // iteration tries to allocate again, and if is successful, then the allocation
230
- // bytes are updated. However, this leads to double counting of the allocation:
231
- // (i) by the original alloc_slow_inline(); and (ii) by the alloc_slow_inline()
232
- // called by acquire(). In order to not double count the allocation, we only
233
- // update allocation bytes if the previous result wasn't 0x0.
228
+ // Only update the allocation bytes if we haven't failed a previous allocation in this loop
234
229
if stress_test && self . get_plan ( ) . is_initialized ( ) && !previous_result_zero {
235
230
let allocated_size =
236
231
if plan. is_precise_stress ( ) || !self . does_thread_local_allocation ( ) {
@@ -302,7 +297,7 @@ pub trait Allocator<VM: VMBinding>: Downcast {
302
297
}
303
298
}
304
299
305
- /// Single slow path allocation attempt. This is called by [`alloc_slow_inline`]. The
300
+ /// Single slow path allocation attempt. This is called by [`alloc_slow_inline`](Allocator::alloc_slow_inline) . The
306
301
/// implementation of this function depends on the allocator used. Generally, if an allocator
307
302
/// supports thread local allocations, it will try to allocate more TLAB space here. If it
308
303
/// doesn't, then (generally) the allocator simply allocates enough space for the current
0 commit comments