Skip to content

Commit 1c64dcb

Browse files
committed
Make chunk map as global side metadata
1 parent b7b5988 commit 1c64dcb

File tree

7 files changed

+85
-59
lines changed

7 files changed

+85
-59
lines changed

src/policy/immix/immixspace.rs

+5-4
Original file line numberDiff line numberDiff line change
@@ -299,6 +299,7 @@ impl<VM: VMBinding> ImmixSpace<VM> {
299299
let scheduler = args.scheduler.clone();
300300
let common =
301301
CommonSpace::new(args.into_policy_args(true, false, Self::side_metadata_specs()));
302+
let space_index = common.descriptor.get_index();
302303
ImmixSpace {
303304
pr: if common.vmrequest.is_discontiguous() {
304305
BlockPageResource::new_discontiguous(
@@ -316,7 +317,7 @@ impl<VM: VMBinding> ImmixSpace<VM> {
316317
)
317318
},
318319
common,
319-
chunk_map: ChunkMap::new(),
320+
chunk_map: ChunkMap::new(space_index),
320321
line_mark_state: AtomicU8::new(Line::RESET_MARK_STATE),
321322
line_unavail_state: AtomicU8::new(Line::RESET_MARK_STATE),
322323
lines_consumed: AtomicUsize::new(0),
@@ -524,7 +525,7 @@ impl<VM: VMBinding> ImmixSpace<VM> {
524525
self.defrag.notify_new_clean_block(copy);
525526
let block = Block::from_aligned_address(block_address);
526527
block.init(copy);
527-
self.chunk_map.set(block.chunk(), ChunkState::allocated(self.common().descriptor.get_index()));
528+
self.chunk_map.set_allocated(block.chunk(), true);
528529
self.lines_consumed
529530
.fetch_add(Block::LINES, Ordering::SeqCst);
530531
Some(block)
@@ -899,7 +900,7 @@ struct SweepChunk<VM: VMBinding> {
899900

900901
impl<VM: VMBinding> GCWork<VM> for SweepChunk<VM> {
901902
fn do_work(&mut self, _worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
902-
assert!(self.space.chunk_map.get(self.chunk).is_allocated());
903+
assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated());
903904

904905
let mut histogram = self.space.defrag.new_histogram();
905906
let line_mark_state = if super::BLOCK_ONLY {
@@ -950,7 +951,7 @@ impl<VM: VMBinding> GCWork<VM> for SweepChunk<VM> {
950951
probe!(mmtk, sweep_chunk, allocated_blocks);
951952
// Set this chunk as free if there is not live blocks.
952953
if allocated_blocks == 0 {
953-
self.space.chunk_map.set(self.chunk, ChunkState::free())
954+
self.space.chunk_map.set_allocated(self.chunk, false)
954955
}
955956
self.space.defrag.add_completed_mark_histogram(histogram);
956957
self.epilogue.finish_one_work_packet();

src/policy/marksweepspace/native_ms/global.rs

+7-6
Original file line numberDiff line numberDiff line change
@@ -305,6 +305,7 @@ impl<VM: VMBinding> MarkSweepSpace<VM> {
305305
])
306306
};
307307
let common = CommonSpace::new(args.into_policy_args(false, false, local_specs));
308+
let space_index = common.descriptor.get_index();
308309
MarkSweepSpace {
309310
pr: if is_discontiguous {
310311
BlockPageResource::new_discontiguous(
@@ -322,7 +323,7 @@ impl<VM: VMBinding> MarkSweepSpace<VM> {
322323
)
323324
},
324325
common,
325-
chunk_map: ChunkMap::new(),
326+
chunk_map: ChunkMap::new(space_index),
326327
scheduler,
327328
abandoned: Mutex::new(AbandonedBlockLists::new()),
328329
abandoned_in_gc: Mutex::new(AbandonedBlockLists::new()),
@@ -402,7 +403,7 @@ impl<VM: VMBinding> MarkSweepSpace<VM> {
402403

403404
pub fn record_new_block(&self, block: Block) {
404405
block.init();
405-
self.chunk_map.set(block.chunk(), ChunkState::allocated(self.common.descriptor.get_index()));
406+
self.chunk_map.set_allocated(block.chunk(), true);
406407
}
407408

408409
pub fn prepare(&mut self, full_heap: bool) {
@@ -567,7 +568,7 @@ struct PrepareChunkMap<VM: VMBinding> {
567568

568569
impl<VM: VMBinding> GCWork<VM> for PrepareChunkMap<VM> {
569570
fn do_work(&mut self, _worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
570-
debug_assert!(self.space.chunk_map.get(self.chunk).is_allocated());
571+
debug_assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated());
571572
// number of allocated blocks.
572573
let mut n_occupied_blocks = 0;
573574
self.chunk
@@ -581,7 +582,7 @@ impl<VM: VMBinding> GCWork<VM> for PrepareChunkMap<VM> {
581582
});
582583
if n_occupied_blocks == 0 {
583584
// Set this chunk as free if there is no live blocks.
584-
self.space.chunk_map.set(self.chunk, ChunkState::free())
585+
self.space.chunk_map.set_allocated(self.chunk, false)
585586
} else {
586587
// Otherwise this chunk is occupied, and we reset the mark bit if it is on the side.
587588
if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC {
@@ -617,7 +618,7 @@ struct SweepChunk<VM: VMBinding> {
617618

618619
impl<VM: VMBinding> GCWork<VM> for SweepChunk<VM> {
619620
fn do_work(&mut self, _worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
620-
assert!(self.space.chunk_map.get(self.chunk).is_allocated());
621+
assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated());
621622

622623
// number of allocated blocks.
623624
let mut allocated_blocks = 0;
@@ -636,7 +637,7 @@ impl<VM: VMBinding> GCWork<VM> for SweepChunk<VM> {
636637
probe!(mmtk, sweep_chunk, allocated_blocks);
637638
// Set this chunk as free if there is not live blocks.
638639
if allocated_blocks == 0 {
639-
self.space.chunk_map.set(self.chunk, ChunkState::free());
640+
self.space.chunk_map.set_allocated(self.chunk, false);
640641
}
641642
self.epilogue.finish_one_work_packet();
642643
}

src/util/heap/chunk_map.rs

+58-27
Original file line numberDiff line numberDiff line change
@@ -44,29 +44,35 @@ impl Chunk {
4444
}
4545
}
4646

47-
/// Chunk allocation state
47+
/// The allocation state for a chunk in the chunk map. It includes whether each chunk is allocated or free, and the space the chunk belongs to.
4848
/// Highest bit: 0 = free, 1 = allocated
4949
/// Lower 4 bits: Space index (0-15)
5050
#[repr(transparent)]
5151
#[derive(PartialEq, Clone, Copy)]
5252
pub struct ChunkState(u8);
5353

5454
impl ChunkState {
55+
/// Create a new ChunkState that represents being allocated in the given space
5556
pub fn allocated(space_index: usize) -> ChunkState {
5657
debug_assert!(space_index < crate::util::heap::layout::heap_parameters::MAX_SPACES);
5758
let mut encode = space_index as u8;
5859
encode |= 0x80;
5960
ChunkState(encode)
6061
}
61-
pub fn free() -> ChunkState {
62-
ChunkState(0)
62+
/// Create a new ChunkState that represents being free in the given space
63+
pub fn free(space_index: usize) -> ChunkState {
64+
debug_assert!(space_index < crate::util::heap::layout::heap_parameters::MAX_SPACES);
65+
ChunkState(space_index as u8)
6366
}
67+
/// Is the chunk free?
6468
pub fn is_free(&self) -> bool {
6569
self.0 & 0x80 == 0
6670
}
71+
/// Is the chunk allocated?
6772
pub fn is_allocated(&self) -> bool {
6873
!self.is_free()
6974
}
75+
/// Get the space index of the chunk
7076
pub fn get_space_index(&self) -> usize {
7177
debug_assert!(self.is_allocated());
7278
let index = (self.0 & 0x0F) as usize;
@@ -78,17 +84,26 @@ impl ChunkState {
7884
impl std::fmt::Debug for ChunkState {
7985
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
8086
if self.is_free() {
81-
write!(f, "Free")
87+
write!(f, "Free({})", self.get_space_index())
8288
} else {
83-
write!(f, "Allocated in space {}", self.get_space_index())
89+
write!(f, "Allocated({})", self.get_space_index())
8490
}
8591
}
8692
}
8793

8894
/// A byte-map to record all the allocated chunks.
8995
/// A plan can use this to maintain records for the chunks that they used, and the states of the chunks.
90-
/// Any plan that uses the chunk map should include the `ALLOC_TABLE` spec in their local sidemetadata specs
96+
/// Any plan that uses the chunk map should include the `ALLOC_TABLE` spec in their local sidemetadata specs.
97+
///
98+
/// A chunk map is created for a space (identified by the space index), and will only update or list chunks for that space.
9199
pub struct ChunkMap {
100+
/// The space that uses this chunk map.
101+
space_index: usize,
102+
/// The range of chunks that are used by the space. The range only records the lowest chunk and the highest chunk.
103+
/// All the chunks that are used for the space are within the range, but not necessarily that all the chunks in the range
104+
/// are used for the space. Spaces may be discontiguous, thus the range may include chunks that do not belong to the space.
105+
/// We need to use the space index in the chunk map and the space index encoded with the chunk state to know if
106+
/// the chunk belongs to the current space.
92107
chunk_range: Mutex<Range<Chunk>>,
93108
}
94109

@@ -97,24 +112,35 @@ impl ChunkMap {
97112
pub const ALLOC_TABLE: SideMetadataSpec =
98113
crate::util::metadata::side_metadata::spec_defs::CHUNK_MARK;
99114

100-
pub fn new() -> Self {
115+
pub fn new(space_index: usize) -> Self {
101116
Self {
117+
space_index,
102118
chunk_range: Mutex::new(Chunk::ZERO..Chunk::ZERO),
103119
}
104120
}
105121

106-
/// Set chunk state
107-
pub fn set(&self, chunk: Chunk, state: ChunkState) {
122+
/// Set a chunk as allocated, or as free.
123+
pub fn set_allocated(&self, chunk: Chunk, allocated: bool) {
124+
let state = if allocated {
125+
ChunkState::allocated(self.space_index)
126+
} else {
127+
ChunkState::free(self.space_index)
128+
};
108129
// Do nothing if the chunk is already in the expected state.
109-
if self.get(chunk) == state {
130+
if self.get_any(chunk) == state {
110131
return;
111132
}
112133
#[cfg(debug_assertions)]
113134
{
114-
let old_state = self.get(chunk);
115-
if state.is_allocated() {
116-
assert!(old_state.is_free() || old_state.get_space_index() == state.get_space_index(), "Chunk {:?}: old state {:?}, new state {:?}. Cannot set to new state.", chunk, old_state, state);
117-
}
135+
let old_state = self.get_any(chunk);
136+
// If a chunk is free, any space may use it. If a chunk is not free, only the current space may update its state.
137+
assert!(
138+
old_state.is_free() || old_state.get_space_index() == state.get_space_index(),
139+
"Chunk {:?}: old state {:?}, new state {:?}. Cannot set to new state.",
140+
chunk,
141+
old_state,
142+
state
143+
);
118144
}
119145
// Update alloc byte
120146
unsafe { Self::ALLOC_TABLE.store::<u8>(chunk.start(), state.0) };
@@ -134,16 +160,30 @@ impl ChunkMap {
134160
}
135161
}
136162

137-
/// Get chunk state
138-
pub fn get(&self, chunk: Chunk) -> ChunkState {
163+
/// Get chunk state. Return None if the chunk does not belong to the space.
164+
pub fn get(&self, chunk: Chunk) -> Option<ChunkState> {
165+
let state = self.get_any(chunk);
166+
(state.get_space_index() == self.space_index).then_some(state)
167+
}
168+
169+
/// Get chunk state, regardless of the space. This should always be private.
170+
fn get_any(&self, chunk: Chunk) -> ChunkState {
139171
let byte = unsafe { Self::ALLOC_TABLE.load::<u8>(chunk.start()) };
140172
ChunkState(byte)
141173
}
142174

143175
/// A range of all chunks in the heap.
144-
pub fn all_chunks(&self) -> RegionIterator<Chunk> {
176+
pub fn all_chunks(&self) -> impl Iterator<Item = Chunk> + use<'_> {
177+
let chunk_range = self.chunk_range.lock();
178+
RegionIterator::<Chunk>::new(chunk_range.start, chunk_range.end)
179+
.filter(|c| self.get(*c).is_some())
180+
}
181+
182+
/// A range of all chunks in the heap.
183+
pub fn all_allocated_chunks(&self) -> impl Iterator<Item = Chunk> + use<'_> {
145184
let chunk_range = self.chunk_range.lock();
146185
RegionIterator::<Chunk>::new(chunk_range.start, chunk_range.end)
186+
.filter(|c| self.get(*c).is_some_and(|state| state.is_allocated()))
147187
}
148188

149189
/// Helper function to create per-chunk processing work packets for each allocated chunks.
@@ -152,18 +192,9 @@ impl ChunkMap {
152192
func: impl Fn(Chunk) -> Box<dyn GCWork<VM>>,
153193
) -> Vec<Box<dyn GCWork<VM>>> {
154194
let mut work_packets: Vec<Box<dyn GCWork<VM>>> = vec![];
155-
for chunk in self
156-
.all_chunks()
157-
.filter(|c| self.get(*c).is_allocated())
158-
{
195+
for chunk in self.all_allocated_chunks() {
159196
work_packets.push(func(chunk));
160197
}
161198
work_packets
162199
}
163200
}
164-
165-
impl Default for ChunkMap {
166-
fn default() -> Self {
167-
Self::new()
168-
}
169-
}

src/util/metadata/side_metadata/global.rs

+5
Original file line numberDiff line numberDiff line change
@@ -1345,6 +1345,11 @@ impl SideMetadataContext {
13451345
}
13461346
}
13471347

1348+
// Any plan that uses the chunk map needs to reserve the chunk map table.
1349+
// As we use either the mark sweep or (non moving) immix as the non moving space,
1350+
// and both policies use the chunk map, we just add the chunk map table globally.
1351+
ret.push(crate::util::heap::chunk_map::ChunkMap::ALLOC_TABLE);
1352+
13481353
ret.extend_from_slice(specs);
13491354
ret
13501355
}

src/util/metadata/side_metadata/spec_defs.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,8 @@ define_side_metadata_specs!(
6060
MS_ACTIVE_CHUNK = (global: true, log_num_of_bits: 3, log_bytes_in_region: LOG_BYTES_IN_CHUNK),
6161
// Track the index in SFT map for a chunk (only used for SFT sparse chunk map)
6262
SFT_DENSE_CHUNK_MAP_INDEX = (global: true, log_num_of_bits: 3, log_bytes_in_region: LOG_BYTES_IN_CHUNK),
63+
// Mark chunks (any plan that uses the chunk map should include this spec in their local sidemetadata specs)
64+
CHUNK_MARK = (global: true, log_num_of_bits: 3, log_bytes_in_region: crate::util::heap::chunk_map::Chunk::LOG_BYTES),
6365
);
6466

6567
// This defines all LOCAL side metadata used by mmtk-core.
@@ -75,8 +77,6 @@ define_side_metadata_specs!(
7577
IX_BLOCK_DEFRAG = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::policy::immix::block::Block::LOG_BYTES),
7678
// Mark blocks by immix
7779
IX_BLOCK_MARK = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::policy::immix::block::Block::LOG_BYTES),
78-
// Mark chunks (any plan that uses the chunk map should include this spec in their local sidemetadata specs)
79-
CHUNK_MARK = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::util::heap::chunk_map::Chunk::LOG_BYTES),
8080
// Mark blocks by (native mimalloc) marksweep
8181
MS_BLOCK_MARK = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::policy::marksweepspace::native_ms::Block::LOG_BYTES),
8282
// Next block in list for native mimalloc

src/util/object_enum.rs

+5-10
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,7 @@ use std::marker::PhantomData;
55
use crate::vm::VMBinding;
66

77
use super::{
8-
heap::{
9-
chunk_map::{ChunkMap, ChunkState},
10-
MonotonePageResource,
11-
},
8+
heap::{chunk_map::ChunkMap, MonotonePageResource},
129
linear_scan::Region,
1310
metadata::{side_metadata::spec_defs::VO_BIT, vo_bit},
1411
Address, ObjectReference,
@@ -84,12 +81,10 @@ pub(crate) fn enumerate_blocks_from_chunk_map<B>(
8481
) where
8582
B: BlockMayHaveObjects,
8683
{
87-
for chunk in chunk_map.all_chunks() {
88-
if chunk_map.get(chunk).is_allocated() {
89-
for block in chunk.iter_region::<B>() {
90-
if block.may_have_objects() {
91-
enumerator.visit_address_range(block.start(), block.end());
92-
}
84+
for chunk in chunk_map.all_allocated_chunks() {
85+
for block in chunk.iter_region::<B>() {
86+
if block.may_have_objects() {
87+
enumerator.visit_address_range(block.start(), block.end());
9388
}
9489
}
9590
}

src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs

+3-10
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,10 @@
11
// GITHUB-CI: MMTK_PLAN=all
22

3-
use lazy_static::lazy_static;
4-
53
use super::mock_test_prelude::*;
64
use crate::plan::AllocationSemantics;
75

86
#[test]
9-
pub fn allocate_alignment() {
7+
pub fn allocate_nonmoving() {
108
with_mockvm(
119
|| -> MockVM {
1210
MockVM {
@@ -20,13 +18,8 @@ pub fn allocate_alignment() {
2018
let mut fixture = MutatorFixture::create_with_heapsize(MB);
2119

2220
// Normal alloc
23-
let addr = memory_manager::alloc(
24-
&mut fixture.mutator,
25-
16,
26-
8,
27-
0,
28-
AllocationSemantics::Default,
29-
);
21+
let addr =
22+
memory_manager::alloc(&mut fixture.mutator, 16, 8, 0, AllocationSemantics::Default);
3023
assert!(!addr.is_zero());
3124

3225
// Non moving alloc

0 commit comments

Comments
 (0)