9
9
//! Note that this implementation does not take into account of C++20's memory model revision to SC accesses
10
10
//! and fences introduced by P0668 (<https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0668r5.html>).
11
11
//! This implementation is not fully correct under the revised C++20 model and may generate behaviours C++20
12
- //! disallows.
12
+ //! disallows (<https://github.com/rust-lang/miri/issues/2301>) .
13
13
//!
14
14
//! Rust follows the C++20 memory model (except for the Consume ordering and some operations not performable through C++'s
15
15
//! std::atomic<T> API). It is therefore possible for this implementation to generate behaviours never observable when the
@@ -82,10 +82,12 @@ use rustc_const_eval::interpret::{
82
82
} ;
83
83
use rustc_data_structures:: fx:: FxHashMap ;
84
84
85
- use crate :: { AtomicReadOp , AtomicRwOp , AtomicWriteOp , Tag , VClock , VTimestamp , VectorIdx } ;
85
+ use crate :: {
86
+ AtomicReadOrd , AtomicRwOrd , AtomicWriteOrd , Tag , ThreadManager , VClock , VTimestamp , VectorIdx ,
87
+ } ;
86
88
87
89
use super :: {
88
- data_race:: { GlobalState , ThreadClockSet } ,
90
+ data_race:: { GlobalState as DataRaceState , ThreadClockSet } ,
89
91
range_object_map:: { AccessType , RangeObjectMap } ,
90
92
} ;
91
93
@@ -149,7 +151,7 @@ impl StoreBufferAlloc {
149
151
/// before without data race, we can determine that the non-atomic access fully happens
150
152
/// after all the prior atomic accesses so the location no longer needs to exhibit
151
153
/// any weak memory behaviours until further atomic accesses.
152
- pub fn memory_accessed ( & self , range : AllocRange , global : & GlobalState ) {
154
+ pub fn memory_accessed ( & self , range : AllocRange , global : & DataRaceState ) {
153
155
if !global. ongoing_action_data_race_free ( ) {
154
156
let mut buffers = self . store_buffers . borrow_mut ( ) ;
155
157
let access_type = buffers. access_type ( range) ;
@@ -236,17 +238,18 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
236
238
}
237
239
238
240
/// Reads from the last store in modification order
239
- fn read_from_last_store ( & self , global : & GlobalState ) {
241
+ fn read_from_last_store ( & self , global : & DataRaceState , thread_mgr : & ThreadManager < ' _ , ' _ > ) {
240
242
let store_elem = self . buffer . back ( ) ;
241
243
if let Some ( store_elem) = store_elem {
242
- let ( index, clocks) = global. current_thread_state ( ) ;
244
+ let ( index, clocks) = global. current_thread_state ( thread_mgr ) ;
243
245
store_elem. load_impl ( index, & clocks) ;
244
246
}
245
247
}
246
248
247
249
fn buffered_read (
248
250
& self ,
249
- global : & GlobalState ,
251
+ global : & DataRaceState ,
252
+ thread_mgr : & ThreadManager < ' _ , ' _ > ,
250
253
is_seqcst : bool ,
251
254
rng : & mut ( impl rand:: Rng + ?Sized ) ,
252
255
validate : impl FnOnce ( ) -> InterpResult < ' tcx > ,
@@ -257,7 +260,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
257
260
let store_elem = {
258
261
// The `clocks` we got here must be dropped before calling validate_atomic_load
259
262
// as the race detector will update it
260
- let ( .., clocks) = global. current_thread_state ( ) ;
263
+ let ( .., clocks) = global. current_thread_state ( thread_mgr ) ;
261
264
// Load from a valid entry in the store buffer
262
265
self . fetch_store ( is_seqcst, & clocks, & mut * rng)
263
266
} ;
@@ -268,18 +271,19 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
268
271
// requires access to ThreadClockSet.clock, which is updated by the race detector
269
272
validate ( ) ?;
270
273
271
- let ( index, clocks) = global. current_thread_state ( ) ;
274
+ let ( index, clocks) = global. current_thread_state ( thread_mgr ) ;
272
275
let loaded = store_elem. load_impl ( index, & clocks) ;
273
276
Ok ( loaded)
274
277
}
275
278
276
279
fn buffered_write (
277
280
& mut self ,
278
281
val : ScalarMaybeUninit < Tag > ,
279
- global : & GlobalState ,
282
+ global : & DataRaceState ,
283
+ thread_mgr : & ThreadManager < ' _ , ' _ > ,
280
284
is_seqcst : bool ,
281
285
) -> InterpResult < ' tcx > {
282
- let ( index, clocks) = global. current_thread_state ( ) ;
286
+ let ( index, clocks) = global. current_thread_state ( thread_mgr ) ;
283
287
284
288
self . store_impl ( val, index, & clocks. clock , is_seqcst) ;
285
289
Ok ( ( ) )
@@ -428,8 +432,11 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
428
432
{
429
433
let range = alloc_range ( base_offset, place. layout . size ) ;
430
434
if alloc_buffers. is_overlapping ( range)
431
- && !alloc_clocks
432
- . race_free_with_atomic ( range, this. machine . data_race . as_ref ( ) . unwrap ( ) )
435
+ && !alloc_clocks. race_free_with_atomic (
436
+ range,
437
+ this. machine . data_race . as_ref ( ) . unwrap ( ) ,
438
+ & this. machine . threads ,
439
+ )
433
440
{
434
441
throw_unsup_format ! (
435
442
"racy imperfectly overlapping atomic access is not possible in the C++20 memory model, and not supported by Miri's weak memory emulation"
@@ -443,41 +450,41 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
443
450
& mut self ,
444
451
new_val : ScalarMaybeUninit < Tag > ,
445
452
place : & MPlaceTy < ' tcx , Tag > ,
446
- atomic : AtomicRwOp ,
453
+ atomic : AtomicRwOrd ,
447
454
init : ScalarMaybeUninit < Tag > ,
448
455
) -> InterpResult < ' tcx > {
449
456
let this = self . eval_context_mut ( ) ;
450
457
let ( alloc_id, base_offset, ..) = this. ptr_get_alloc_id ( place. ptr ) ?;
451
458
if let (
452
459
crate :: AllocExtra { weak_memory : Some ( alloc_buffers) , .. } ,
453
- crate :: Evaluator { data_race : Some ( global) , .. } ,
460
+ crate :: Evaluator { data_race : Some ( global) , threads , .. } ,
454
461
) = this. get_alloc_extra_mut ( alloc_id) ?
455
462
{
456
- if atomic == AtomicRwOp :: SeqCst {
457
- global. sc_read ( ) ;
458
- global. sc_write ( ) ;
463
+ if atomic == AtomicRwOrd :: SeqCst {
464
+ global. sc_read ( threads ) ;
465
+ global. sc_write ( threads ) ;
459
466
}
460
467
let range = alloc_range ( base_offset, place. layout . size ) ;
461
468
let buffer = alloc_buffers. get_or_create_store_buffer_mut ( range, init) ?;
462
- buffer. read_from_last_store ( global) ;
463
- buffer. buffered_write ( new_val, global, atomic == AtomicRwOp :: SeqCst ) ?;
469
+ buffer. read_from_last_store ( global, threads ) ;
470
+ buffer. buffered_write ( new_val, global, threads , atomic == AtomicRwOrd :: SeqCst ) ?;
464
471
}
465
472
Ok ( ( ) )
466
473
}
467
474
468
475
fn buffered_atomic_read (
469
476
& self ,
470
477
place : & MPlaceTy < ' tcx , Tag > ,
471
- atomic : AtomicReadOp ,
478
+ atomic : AtomicReadOrd ,
472
479
latest_in_mo : ScalarMaybeUninit < Tag > ,
473
480
validate : impl FnOnce ( ) -> InterpResult < ' tcx > ,
474
481
) -> InterpResult < ' tcx , ScalarMaybeUninit < Tag > > {
475
482
let this = self . eval_context_ref ( ) ;
476
483
if let Some ( global) = & this. machine . data_race {
477
484
let ( alloc_id, base_offset, ..) = this. ptr_get_alloc_id ( place. ptr ) ?;
478
485
if let Some ( alloc_buffers) = this. get_alloc_extra ( alloc_id) ?. weak_memory . as_ref ( ) {
479
- if atomic == AtomicReadOp :: SeqCst {
480
- global. sc_read ( ) ;
486
+ if atomic == AtomicReadOrd :: SeqCst {
487
+ global. sc_read ( & this . machine . threads ) ;
481
488
}
482
489
let mut rng = this. machine . rng . borrow_mut ( ) ;
483
490
let buffer = alloc_buffers. get_or_create_store_buffer (
@@ -486,7 +493,8 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
486
493
) ?;
487
494
let loaded = buffer. buffered_read (
488
495
global,
489
- atomic == AtomicReadOp :: SeqCst ,
496
+ & this. machine . threads ,
497
+ atomic == AtomicReadOrd :: SeqCst ,
490
498
& mut * rng,
491
499
validate,
492
500
) ?;
@@ -504,18 +512,18 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
504
512
& mut self ,
505
513
val : ScalarMaybeUninit < Tag > ,
506
514
dest : & MPlaceTy < ' tcx , Tag > ,
507
- atomic : AtomicWriteOp ,
515
+ atomic : AtomicWriteOrd ,
508
516
init : ScalarMaybeUninit < Tag > ,
509
517
) -> InterpResult < ' tcx > {
510
518
let this = self . eval_context_mut ( ) ;
511
519
let ( alloc_id, base_offset, ..) = this. ptr_get_alloc_id ( dest. ptr ) ?;
512
520
if let (
513
521
crate :: AllocExtra { weak_memory : Some ( alloc_buffers) , .. } ,
514
- crate :: Evaluator { data_race : Some ( global) , .. } ,
522
+ crate :: Evaluator { data_race : Some ( global) , threads , .. } ,
515
523
) = this. get_alloc_extra_mut ( alloc_id) ?
516
524
{
517
- if atomic == AtomicWriteOp :: SeqCst {
518
- global. sc_write ( ) ;
525
+ if atomic == AtomicWriteOrd :: SeqCst {
526
+ global. sc_write ( threads ) ;
519
527
}
520
528
521
529
// UGLY HACK: in write_scalar_atomic() we don't know the value before our write,
@@ -535,7 +543,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
535
543
buffer. buffer . pop_front ( ) ;
536
544
}
537
545
538
- buffer. buffered_write ( val, global, atomic == AtomicWriteOp :: SeqCst ) ?;
546
+ buffer. buffered_write ( val, global, threads , atomic == AtomicWriteOrd :: SeqCst ) ?;
539
547
}
540
548
541
549
// Caller should've written to dest with the vanilla scalar write, we do nothing here
@@ -548,21 +556,21 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
548
556
fn perform_read_on_buffered_latest (
549
557
& self ,
550
558
place : & MPlaceTy < ' tcx , Tag > ,
551
- atomic : AtomicReadOp ,
559
+ atomic : AtomicReadOrd ,
552
560
init : ScalarMaybeUninit < Tag > ,
553
561
) -> InterpResult < ' tcx > {
554
562
let this = self . eval_context_ref ( ) ;
555
563
556
564
if let Some ( global) = & this. machine . data_race {
557
- if atomic == AtomicReadOp :: SeqCst {
558
- global. sc_read ( ) ;
565
+ if atomic == AtomicReadOrd :: SeqCst {
566
+ global. sc_read ( & this . machine . threads ) ;
559
567
}
560
568
let size = place. layout . size ;
561
569
let ( alloc_id, base_offset, ..) = this. ptr_get_alloc_id ( place. ptr ) ?;
562
570
if let Some ( alloc_buffers) = this. get_alloc_extra ( alloc_id) ?. weak_memory . as_ref ( ) {
563
571
let buffer = alloc_buffers
564
572
. get_or_create_store_buffer ( alloc_range ( base_offset, size) , init) ?;
565
- buffer. read_from_last_store ( global) ;
573
+ buffer. read_from_last_store ( global, & this . machine . threads ) ;
566
574
}
567
575
}
568
576
Ok ( ( ) )
0 commit comments