1
+ use std:: iter;
2
+
1
3
use log:: trace;
2
4
5
+ use rand:: { seq:: IteratorRandom , Rng } ;
6
+ use rustc_apfloat:: { Float , FloatConvert } ;
3
7
use rustc_middle:: mir;
4
8
use rustc_target:: abi:: Size ;
5
9
6
10
use crate :: * ;
7
11
8
- pub trait EvalContextExt < ' tcx > {
9
- fn binary_ptr_op (
10
- & self ,
11
- bin_op : mir:: BinOp ,
12
- left : & ImmTy < ' tcx , Provenance > ,
13
- right : & ImmTy < ' tcx , Provenance > ,
14
- ) -> InterpResult < ' tcx , ( ImmTy < ' tcx , Provenance > , bool ) > ;
15
- }
16
-
17
- impl < ' mir , ' tcx > EvalContextExt < ' tcx > for super :: MiriInterpCx < ' mir , ' tcx > {
12
+ impl < ' mir , ' tcx : ' mir > EvalContextExt < ' mir , ' tcx > for crate :: MiriInterpCx < ' mir , ' tcx > { }
13
+ pub trait EvalContextExt < ' mir , ' tcx : ' mir > : crate :: MiriInterpCxExt < ' mir , ' tcx > {
18
14
fn binary_ptr_op (
19
15
& self ,
20
16
bin_op : mir:: BinOp ,
@@ -23,12 +19,13 @@ impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriInterpCx<'mir, 'tcx> {
23
19
) -> InterpResult < ' tcx , ( ImmTy < ' tcx , Provenance > , bool ) > {
24
20
use rustc_middle:: mir:: BinOp :: * ;
25
21
22
+ let this = self . eval_context_ref ( ) ;
26
23
trace ! ( "ptr_op: {:?} {:?} {:?}" , * left, bin_op, * right) ;
27
24
28
25
Ok ( match bin_op {
29
26
Eq | Ne | Lt | Le | Gt | Ge => {
30
27
assert_eq ! ( left. layout. abi, right. layout. abi) ; // types an differ, e.g. fn ptrs with different `for`
31
- let size = self . pointer_size ( ) ;
28
+ let size = this . pointer_size ( ) ;
32
29
// Just compare the bits. ScalarPairs are compared lexicographically.
33
30
// We thus always compare pairs and simply fill scalars up with 0.
34
31
let left = match * * left {
@@ -50,34 +47,75 @@ impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriInterpCx<'mir, 'tcx> {
50
47
Ge => left >= right,
51
48
_ => bug ! ( ) ,
52
49
} ;
53
- ( ImmTy :: from_bool ( res, * self . tcx ) , false )
50
+ ( ImmTy :: from_bool ( res, * this . tcx ) , false )
54
51
}
55
52
56
53
// Some more operations are possible with atomics.
57
54
// The return value always has the provenance of the *left* operand.
58
55
Add | Sub | BitOr | BitAnd | BitXor => {
59
56
assert ! ( left. layout. ty. is_unsafe_ptr( ) ) ;
60
57
assert ! ( right. layout. ty. is_unsafe_ptr( ) ) ;
61
- let ptr = left. to_scalar ( ) . to_pointer ( self ) ?;
58
+ let ptr = left. to_scalar ( ) . to_pointer ( this ) ?;
62
59
// We do the actual operation with usize-typed scalars.
63
- let left = ImmTy :: from_uint ( ptr. addr ( ) . bytes ( ) , self . machine . layouts . usize ) ;
60
+ let left = ImmTy :: from_uint ( ptr. addr ( ) . bytes ( ) , this . machine . layouts . usize ) ;
64
61
let right = ImmTy :: from_uint (
65
- right. to_scalar ( ) . to_target_usize ( self ) ?,
66
- self . machine . layouts . usize ,
62
+ right. to_scalar ( ) . to_target_usize ( this ) ?,
63
+ this . machine . layouts . usize ,
67
64
) ;
68
- let ( result, overflowing) = self . overflowing_binary_op ( bin_op, & left, & right) ?;
65
+ let ( result, overflowing) = this . overflowing_binary_op ( bin_op, & left, & right) ?;
69
66
// Construct a new pointer with the provenance of `ptr` (the LHS).
70
67
let result_ptr = Pointer :: new (
71
68
ptr. provenance ,
72
- Size :: from_bytes ( result. to_scalar ( ) . to_target_usize ( self ) ?) ,
69
+ Size :: from_bytes ( result. to_scalar ( ) . to_target_usize ( this ) ?) ,
73
70
) ;
74
71
(
75
- ImmTy :: from_scalar ( Scalar :: from_maybe_pointer ( result_ptr, self ) , left. layout ) ,
72
+ ImmTy :: from_scalar ( Scalar :: from_maybe_pointer ( result_ptr, this ) , left. layout ) ,
76
73
overflowing,
77
74
)
78
75
}
79
76
80
- _ => span_bug ! ( self . cur_span( ) , "Invalid operator on pointers: {:?}" , bin_op) ,
77
+ _ => span_bug ! ( this . cur_span( ) , "Invalid operator on pointers: {:?}" , bin_op) ,
81
78
} )
82
79
}
80
+
81
+ fn generate_nan < F1 : Float + FloatConvert < F2 > , F2 : Float > ( & self , inputs : & [ F1 ] ) -> F2 {
82
+ /// Make the given NaN a signaling NaN.
83
+ /// Returns `None` if this would not result in a NaN.
84
+ fn make_signaling < F : Float > ( f : F ) -> Option < F > {
85
+ // The quiet/signaling bit is the leftmost bit in the mantissa.
86
+ // That's position `PRECISION-1`, since `PRECISION` includes the fixed leading 1 bit,
87
+ // and then we subtract 1 more since this is 0-indexed.
88
+ let quiet_bit_mask = 1 << ( F :: PRECISION - 2 ) ;
89
+ // Unset the bit. Double-check that this wasn't the last bit set in the payload.
90
+ // (which would turn the NaN into an infinity).
91
+ let f = F :: from_bits ( f. to_bits ( ) & !quiet_bit_mask) ;
92
+ if f. is_nan ( ) { Some ( f) } else { None }
93
+ }
94
+
95
+ let this = self . eval_context_ref ( ) ;
96
+ let mut rand = this. machine . rng . borrow_mut ( ) ;
97
+ // Assemble an iterator of possible NaNs: preferred, quieting propagation, unchanged propagation.
98
+ // On some targets there are more possibilities; for now we just generate those options that
99
+ // are possible everywhere.
100
+ let preferred_nan = F2 :: qnan ( Some ( 0 ) ) ;
101
+ let nans = iter:: once ( preferred_nan)
102
+ . chain ( inputs. iter ( ) . filter ( |f| f. is_nan ( ) ) . map ( |& f| {
103
+ // Regular apfloat cast is quieting.
104
+ f. convert ( & mut false ) . value
105
+ } ) )
106
+ . chain ( inputs. iter ( ) . filter ( |f| f. is_signaling ( ) ) . filter_map ( |& f| {
107
+ let f: F2 = f. convert ( & mut false ) . value ;
108
+ // We have to de-quiet this again for unchanged propagation.
109
+ make_signaling ( f)
110
+ } ) ) ;
111
+ // Pick one of the NaNs.
112
+ let nan = nans. choose ( & mut * rand) . unwrap ( ) ;
113
+ // Non-deterministically flip the sign.
114
+ if rand. gen ( ) {
115
+ // This will properly flip even for NaN.
116
+ -nan
117
+ } else {
118
+ nan
119
+ }
120
+ }
83
121
}
0 commit comments