Skip to content

Commit d118a94

Browse files
committed
Auto merge of rust-lang#2302 - RalfJung:datarace, r=RalfJung
avoid copying thread manager state in data race detector When doing rust-lang/miri#2047 I did not realize that there is some redundant state here that we can now remove from the data race detector. Also this removes the vector clocks from the data race errors since those don't really help diagnose the problem.
2 parents 70b97a5 + d09db16 commit d118a94

File tree

61 files changed

+431
-419
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+431
-419
lines changed

src/concurrency/data_race.rs

Lines changed: 129 additions & 156 deletions
Large diffs are not rendered by default.

src/concurrency/weak_memory.rs

Lines changed: 41 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
//! Note that this implementation does not take into account of C++20's memory model revision to SC accesses
1010
//! and fences introduced by P0668 (<https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0668r5.html>).
1111
//! This implementation is not fully correct under the revised C++20 model and may generate behaviours C++20
12-
//! disallows.
12+
//! disallows (<https://github.com/rust-lang/miri/issues/2301>).
1313
//!
1414
//! Rust follows the C++20 memory model (except for the Consume ordering and some operations not performable through C++'s
1515
//! std::atomic<T> API). It is therefore possible for this implementation to generate behaviours never observable when the
@@ -82,10 +82,12 @@ use rustc_const_eval::interpret::{
8282
};
8383
use rustc_data_structures::fx::FxHashMap;
8484

85-
use crate::{AtomicReadOp, AtomicRwOp, AtomicWriteOp, Tag, VClock, VTimestamp, VectorIdx};
85+
use crate::{
86+
AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, Tag, ThreadManager, VClock, VTimestamp, VectorIdx,
87+
};
8688

8789
use super::{
88-
data_race::{GlobalState, ThreadClockSet},
90+
data_race::{GlobalState as DataRaceState, ThreadClockSet},
8991
range_object_map::{AccessType, RangeObjectMap},
9092
};
9193

@@ -149,7 +151,7 @@ impl StoreBufferAlloc {
149151
/// before without data race, we can determine that the non-atomic access fully happens
150152
/// after all the prior atomic accesses so the location no longer needs to exhibit
151153
/// any weak memory behaviours until further atomic accesses.
152-
pub fn memory_accessed(&self, range: AllocRange, global: &GlobalState) {
154+
pub fn memory_accessed(&self, range: AllocRange, global: &DataRaceState) {
153155
if !global.ongoing_action_data_race_free() {
154156
let mut buffers = self.store_buffers.borrow_mut();
155157
let access_type = buffers.access_type(range);
@@ -236,17 +238,18 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
236238
}
237239

238240
/// Reads from the last store in modification order
239-
fn read_from_last_store(&self, global: &GlobalState) {
241+
fn read_from_last_store(&self, global: &DataRaceState, thread_mgr: &ThreadManager<'_, '_>) {
240242
let store_elem = self.buffer.back();
241243
if let Some(store_elem) = store_elem {
242-
let (index, clocks) = global.current_thread_state();
244+
let (index, clocks) = global.current_thread_state(thread_mgr);
243245
store_elem.load_impl(index, &clocks);
244246
}
245247
}
246248

247249
fn buffered_read(
248250
&self,
249-
global: &GlobalState,
251+
global: &DataRaceState,
252+
thread_mgr: &ThreadManager<'_, '_>,
250253
is_seqcst: bool,
251254
rng: &mut (impl rand::Rng + ?Sized),
252255
validate: impl FnOnce() -> InterpResult<'tcx>,
@@ -257,7 +260,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
257260
let store_elem = {
258261
// The `clocks` we got here must be dropped before calling validate_atomic_load
259262
// as the race detector will update it
260-
let (.., clocks) = global.current_thread_state();
263+
let (.., clocks) = global.current_thread_state(thread_mgr);
261264
// Load from a valid entry in the store buffer
262265
self.fetch_store(is_seqcst, &clocks, &mut *rng)
263266
};
@@ -268,18 +271,19 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
268271
// requires access to ThreadClockSet.clock, which is updated by the race detector
269272
validate()?;
270273

271-
let (index, clocks) = global.current_thread_state();
274+
let (index, clocks) = global.current_thread_state(thread_mgr);
272275
let loaded = store_elem.load_impl(index, &clocks);
273276
Ok(loaded)
274277
}
275278

276279
fn buffered_write(
277280
&mut self,
278281
val: ScalarMaybeUninit<Tag>,
279-
global: &GlobalState,
282+
global: &DataRaceState,
283+
thread_mgr: &ThreadManager<'_, '_>,
280284
is_seqcst: bool,
281285
) -> InterpResult<'tcx> {
282-
let (index, clocks) = global.current_thread_state();
286+
let (index, clocks) = global.current_thread_state(thread_mgr);
283287

284288
self.store_impl(val, index, &clocks.clock, is_seqcst);
285289
Ok(())
@@ -428,8 +432,11 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
428432
{
429433
let range = alloc_range(base_offset, place.layout.size);
430434
if alloc_buffers.is_overlapping(range)
431-
&& !alloc_clocks
432-
.race_free_with_atomic(range, this.machine.data_race.as_ref().unwrap())
435+
&& !alloc_clocks.race_free_with_atomic(
436+
range,
437+
this.machine.data_race.as_ref().unwrap(),
438+
&this.machine.threads,
439+
)
433440
{
434441
throw_unsup_format!(
435442
"racy imperfectly overlapping atomic access is not possible in the C++20 memory model, and not supported by Miri's weak memory emulation"
@@ -443,41 +450,41 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
443450
&mut self,
444451
new_val: ScalarMaybeUninit<Tag>,
445452
place: &MPlaceTy<'tcx, Tag>,
446-
atomic: AtomicRwOp,
453+
atomic: AtomicRwOrd,
447454
init: ScalarMaybeUninit<Tag>,
448455
) -> InterpResult<'tcx> {
449456
let this = self.eval_context_mut();
450457
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
451458
if let (
452459
crate::AllocExtra { weak_memory: Some(alloc_buffers), .. },
453-
crate::Evaluator { data_race: Some(global), .. },
460+
crate::Evaluator { data_race: Some(global), threads, .. },
454461
) = this.get_alloc_extra_mut(alloc_id)?
455462
{
456-
if atomic == AtomicRwOp::SeqCst {
457-
global.sc_read();
458-
global.sc_write();
463+
if atomic == AtomicRwOrd::SeqCst {
464+
global.sc_read(threads);
465+
global.sc_write(threads);
459466
}
460467
let range = alloc_range(base_offset, place.layout.size);
461468
let buffer = alloc_buffers.get_or_create_store_buffer_mut(range, init)?;
462-
buffer.read_from_last_store(global);
463-
buffer.buffered_write(new_val, global, atomic == AtomicRwOp::SeqCst)?;
469+
buffer.read_from_last_store(global, threads);
470+
buffer.buffered_write(new_val, global, threads, atomic == AtomicRwOrd::SeqCst)?;
464471
}
465472
Ok(())
466473
}
467474

468475
fn buffered_atomic_read(
469476
&self,
470477
place: &MPlaceTy<'tcx, Tag>,
471-
atomic: AtomicReadOp,
478+
atomic: AtomicReadOrd,
472479
latest_in_mo: ScalarMaybeUninit<Tag>,
473480
validate: impl FnOnce() -> InterpResult<'tcx>,
474481
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
475482
let this = self.eval_context_ref();
476483
if let Some(global) = &this.machine.data_race {
477484
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
478485
if let Some(alloc_buffers) = this.get_alloc_extra(alloc_id)?.weak_memory.as_ref() {
479-
if atomic == AtomicReadOp::SeqCst {
480-
global.sc_read();
486+
if atomic == AtomicReadOrd::SeqCst {
487+
global.sc_read(&this.machine.threads);
481488
}
482489
let mut rng = this.machine.rng.borrow_mut();
483490
let buffer = alloc_buffers.get_or_create_store_buffer(
@@ -486,7 +493,8 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
486493
)?;
487494
let loaded = buffer.buffered_read(
488495
global,
489-
atomic == AtomicReadOp::SeqCst,
496+
&this.machine.threads,
497+
atomic == AtomicReadOrd::SeqCst,
490498
&mut *rng,
491499
validate,
492500
)?;
@@ -504,18 +512,18 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
504512
&mut self,
505513
val: ScalarMaybeUninit<Tag>,
506514
dest: &MPlaceTy<'tcx, Tag>,
507-
atomic: AtomicWriteOp,
515+
atomic: AtomicWriteOrd,
508516
init: ScalarMaybeUninit<Tag>,
509517
) -> InterpResult<'tcx> {
510518
let this = self.eval_context_mut();
511519
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(dest.ptr)?;
512520
if let (
513521
crate::AllocExtra { weak_memory: Some(alloc_buffers), .. },
514-
crate::Evaluator { data_race: Some(global), .. },
522+
crate::Evaluator { data_race: Some(global), threads, .. },
515523
) = this.get_alloc_extra_mut(alloc_id)?
516524
{
517-
if atomic == AtomicWriteOp::SeqCst {
518-
global.sc_write();
525+
if atomic == AtomicWriteOrd::SeqCst {
526+
global.sc_write(threads);
519527
}
520528

521529
// UGLY HACK: in write_scalar_atomic() we don't know the value before our write,
@@ -535,7 +543,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
535543
buffer.buffer.pop_front();
536544
}
537545

538-
buffer.buffered_write(val, global, atomic == AtomicWriteOp::SeqCst)?;
546+
buffer.buffered_write(val, global, threads, atomic == AtomicWriteOrd::SeqCst)?;
539547
}
540548

541549
// Caller should've written to dest with the vanilla scalar write, we do nothing here
@@ -548,21 +556,21 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
548556
fn perform_read_on_buffered_latest(
549557
&self,
550558
place: &MPlaceTy<'tcx, Tag>,
551-
atomic: AtomicReadOp,
559+
atomic: AtomicReadOrd,
552560
init: ScalarMaybeUninit<Tag>,
553561
) -> InterpResult<'tcx> {
554562
let this = self.eval_context_ref();
555563

556564
if let Some(global) = &this.machine.data_race {
557-
if atomic == AtomicReadOp::SeqCst {
558-
global.sc_read();
565+
if atomic == AtomicReadOrd::SeqCst {
566+
global.sc_read(&this.machine.threads);
559567
}
560568
let size = place.layout.size;
561569
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
562570
if let Some(alloc_buffers) = this.get_alloc_extra(alloc_id)?.weak_memory.as_ref() {
563571
let buffer = alloc_buffers
564572
.get_or_create_store_buffer(alloc_range(base_offset, size), init)?;
565-
buffer.read_from_last_store(global);
573+
buffer.read_from_last_store(global, &this.machine.threads);
566574
}
567575
}
568576
Ok(())

src/lib.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ pub use crate::shims::tls::{EvalContextExt as _, TlsData};
7070
pub use crate::shims::EvalContextExt as _;
7171

7272
pub use crate::concurrency::data_race::{
73-
AtomicFenceOp, AtomicReadOp, AtomicRwOp, AtomicWriteOp,
73+
AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd,
7474
EvalContextExt as DataRaceEvalContextExt,
7575
};
7676
pub use crate::diagnostics::{

src/machine.rs

Lines changed: 24 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -647,7 +647,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
647647
None
648648
};
649649
let race_alloc = if let Some(data_race) = &ecx.machine.data_race {
650-
Some(data_race::AllocExtra::new_allocation(data_race, alloc.size(), kind))
650+
Some(data_race::AllocExtra::new_allocation(
651+
data_race,
652+
&ecx.machine.threads,
653+
alloc.size(),
654+
kind,
655+
))
651656
} else {
652657
None
653658
};
@@ -756,7 +761,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
756761
range: AllocRange,
757762
) -> InterpResult<'tcx> {
758763
if let Some(data_race) = &alloc_extra.data_race {
759-
data_race.read(alloc_id, range, machine.data_race.as_ref().unwrap())?;
764+
data_race.read(
765+
alloc_id,
766+
range,
767+
machine.data_race.as_ref().unwrap(),
768+
&machine.threads,
769+
)?;
760770
}
761771
if let Some(stacked_borrows) = &alloc_extra.stacked_borrows {
762772
stacked_borrows.borrow_mut().memory_read(
@@ -782,7 +792,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
782792
range: AllocRange,
783793
) -> InterpResult<'tcx> {
784794
if let Some(data_race) = &mut alloc_extra.data_race {
785-
data_race.write(alloc_id, range, machine.data_race.as_mut().unwrap())?;
795+
data_race.write(
796+
alloc_id,
797+
range,
798+
machine.data_race.as_mut().unwrap(),
799+
&machine.threads,
800+
)?;
786801
}
787802
if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
788803
stacked_borrows.get_mut().memory_written(
@@ -811,7 +826,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
811826
register_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
812827
}
813828
if let Some(data_race) = &mut alloc_extra.data_race {
814-
data_race.deallocate(alloc_id, range, machine.data_race.as_mut().unwrap())?;
829+
data_race.deallocate(
830+
alloc_id,
831+
range,
832+
machine.data_race.as_mut().unwrap(),
833+
&machine.threads,
834+
)?;
815835
}
816836
if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
817837
stacked_borrows.get_mut().memory_deallocated(

0 commit comments

Comments
 (0)