Skip to content

Commit 9cb5855

Browse files
authored
Rollup merge of rust-lang#103456 - scottmcm:fix-unchecked-shifts, r=m-ou-se
`unchecked_{shl|shr}` should use `u32` as the RHS The other shift methods, such as https://doc.rust-lang.org/nightly/std/primitive.u64.html#method.checked_shr and https://doc.rust-lang.org/nightly/std/primitive.i16.html#method.wrapping_shl, use `u32` for the shift amount. That's consistent with other things, like `count_ones`, which also always use `u32` for a bit count, regardless of the size of the type. This PR changes `unchecked_shl` and `unchecked_shr` to also use `u32` for the shift amount (rather than Self). cc rust-lang#85122, the `unchecked_math` tracking issue
2 parents 81a75dd + 3b16c04 commit 9cb5855

File tree

5 files changed

+87
-12
lines changed

5 files changed

+87
-12
lines changed

Diff for: library/core/src/lib.rs

+1
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,7 @@
131131
#![feature(const_pin)]
132132
#![feature(const_ptr_sub_ptr)]
133133
#![feature(const_replace)]
134+
#![feature(const_result_drop)]
134135
#![feature(const_ptr_as_ref)]
135136
#![feature(const_ptr_is_null)]
136137
#![feature(const_ptr_read)]

Diff for: library/core/src/num/int_macros.rs

+10-6
Original file line numberDiff line numberDiff line change
@@ -761,10 +761,11 @@ macro_rules! int_impl {
761761
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
762762
#[inline(always)]
763763
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
764-
pub const unsafe fn unchecked_shl(self, rhs: Self) -> Self {
764+
pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
765765
// SAFETY: the caller must uphold the safety contract for
766766
// `unchecked_shl`.
767-
unsafe { intrinsics::unchecked_shl(self, rhs) }
767+
// Any legal shift amount is losslessly representable in the self type.
768+
unsafe { intrinsics::unchecked_shl(self, rhs.try_into().ok().unwrap_unchecked()) }
768769
}
769770

770771
/// Checked shift right. Computes `self >> rhs`, returning `None` if `rhs` is
@@ -808,10 +809,11 @@ macro_rules! int_impl {
808809
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
809810
#[inline(always)]
810811
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
811-
pub const unsafe fn unchecked_shr(self, rhs: Self) -> Self {
812+
pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
812813
// SAFETY: the caller must uphold the safety contract for
813814
// `unchecked_shr`.
814-
unsafe { intrinsics::unchecked_shr(self, rhs) }
815+
// Any legal shift amount is losslessly representable in the self type.
816+
unsafe { intrinsics::unchecked_shr(self, rhs.try_into().ok().unwrap_unchecked()) }
815817
}
816818

817819
/// Checked absolute value. Computes `self.abs()`, returning `None` if
@@ -1358,11 +1360,12 @@ macro_rules! int_impl {
13581360
#[must_use = "this returns the result of the operation, \
13591361
without modifying the original"]
13601362
#[inline(always)]
1363+
#[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
13611364
pub const fn wrapping_shl(self, rhs: u32) -> Self {
13621365
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
13631366
// out of bounds
13641367
unsafe {
1365-
intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT)
1368+
self.unchecked_shl(rhs & ($BITS - 1))
13661369
}
13671370
}
13681371

@@ -1387,11 +1390,12 @@ macro_rules! int_impl {
13871390
#[must_use = "this returns the result of the operation, \
13881391
without modifying the original"]
13891392
#[inline(always)]
1393+
#[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
13901394
pub const fn wrapping_shr(self, rhs: u32) -> Self {
13911395
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
13921396
// out of bounds
13931397
unsafe {
1394-
intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT)
1398+
self.unchecked_shr(rhs & ($BITS - 1))
13951399
}
13961400
}
13971401

Diff for: library/core/src/num/mod.rs

+1
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#![stable(feature = "rust1", since = "1.0.0")]
44

55
use crate::ascii;
6+
use crate::convert::TryInto;
67
use crate::error::Error;
78
use crate::intrinsics;
89
use crate::mem;

Diff for: library/core/src/num/uint_macros.rs

+10-6
Original file line numberDiff line numberDiff line change
@@ -908,10 +908,11 @@ macro_rules! uint_impl {
908908
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
909909
#[inline(always)]
910910
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
911-
pub const unsafe fn unchecked_shl(self, rhs: Self) -> Self {
911+
pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
912912
// SAFETY: the caller must uphold the safety contract for
913913
// `unchecked_shl`.
914-
unsafe { intrinsics::unchecked_shl(self, rhs) }
914+
// Any legal shift amount is losslessly representable in the self type.
915+
unsafe { intrinsics::unchecked_shl(self, rhs.try_into().ok().unwrap_unchecked()) }
915916
}
916917

917918
/// Checked shift right. Computes `self >> rhs`, returning `None`
@@ -955,10 +956,11 @@ macro_rules! uint_impl {
955956
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
956957
#[inline(always)]
957958
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
958-
pub const unsafe fn unchecked_shr(self, rhs: Self) -> Self {
959+
pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
959960
// SAFETY: the caller must uphold the safety contract for
960961
// `unchecked_shr`.
961-
unsafe { intrinsics::unchecked_shr(self, rhs) }
962+
// Any legal shift amount is losslessly representable in the self type.
963+
unsafe { intrinsics::unchecked_shr(self, rhs.try_into().ok().unwrap_unchecked()) }
962964
}
963965

964966
/// Checked exponentiation. Computes `self.pow(exp)`, returning `None` if
@@ -1374,11 +1376,12 @@ macro_rules! uint_impl {
13741376
#[must_use = "this returns the result of the operation, \
13751377
without modifying the original"]
13761378
#[inline(always)]
1379+
#[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
13771380
pub const fn wrapping_shl(self, rhs: u32) -> Self {
13781381
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
13791382
// out of bounds
13801383
unsafe {
1381-
intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT)
1384+
self.unchecked_shl(rhs & ($BITS - 1))
13821385
}
13831386
}
13841387

@@ -1406,11 +1409,12 @@ macro_rules! uint_impl {
14061409
#[must_use = "this returns the result of the operation, \
14071410
without modifying the original"]
14081411
#[inline(always)]
1412+
#[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
14091413
pub const fn wrapping_shr(self, rhs: u32) -> Self {
14101414
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
14111415
// out of bounds
14121416
unsafe {
1413-
intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT)
1417+
self.unchecked_shr(rhs & ($BITS - 1))
14141418
}
14151419
}
14161420

Diff for: src/test/codegen/unchecked_shifts.rs

+65
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
// compile-flags: -O
2+
// min-llvm-version: 15.0 (LLVM 13 in CI does this differently from submodule LLVM)
3+
4+
#![crate_type = "lib"]
5+
#![feature(unchecked_math)]
6+
7+
// CHECK-LABEL: @unchecked_shl_unsigned_same
8+
#[no_mangle]
9+
pub unsafe fn unchecked_shl_unsigned_same(a: u32, b: u32) -> u32 {
10+
// CHECK-NOT: and i32
11+
// CHECK: shl i32 %a, %b
12+
// CHECK-NOT: and i32
13+
a.unchecked_shl(b)
14+
}
15+
16+
// CHECK-LABEL: @unchecked_shl_unsigned_smaller
17+
#[no_mangle]
18+
pub unsafe fn unchecked_shl_unsigned_smaller(a: u16, b: u32) -> u16 {
19+
// This uses -DAG to avoid failing on irrelevant reorderings,
20+
// like emitting the truncation earlier.
21+
22+
// CHECK-DAG: %[[INRANGE:.+]] = icmp ult i32 %b, 65536
23+
// CHECK-DAG: tail call void @llvm.assume(i1 %[[INRANGE]])
24+
// CHECK-DAG: %[[TRUNC:.+]] = trunc i32 %b to i16
25+
// CHECK-DAG: shl i16 %a, %[[TRUNC]]
26+
a.unchecked_shl(b)
27+
}
28+
29+
// CHECK-LABEL: @unchecked_shl_unsigned_bigger
30+
#[no_mangle]
31+
pub unsafe fn unchecked_shl_unsigned_bigger(a: u64, b: u32) -> u64 {
32+
// CHECK: %[[EXT:.+]] = zext i32 %b to i64
33+
// CHECK: shl i64 %a, %[[EXT]]
34+
a.unchecked_shl(b)
35+
}
36+
37+
// CHECK-LABEL: @unchecked_shr_signed_same
38+
#[no_mangle]
39+
pub unsafe fn unchecked_shr_signed_same(a: i32, b: u32) -> i32 {
40+
// CHECK-NOT: and i32
41+
// CHECK: ashr i32 %a, %b
42+
// CHECK-NOT: and i32
43+
a.unchecked_shr(b)
44+
}
45+
46+
// CHECK-LABEL: @unchecked_shr_signed_smaller
47+
#[no_mangle]
48+
pub unsafe fn unchecked_shr_signed_smaller(a: i16, b: u32) -> i16 {
49+
// This uses -DAG to avoid failing on irrelevant reorderings,
50+
// like emitting the truncation earlier.
51+
52+
// CHECK-DAG: %[[INRANGE:.+]] = icmp ult i32 %b, 32768
53+
// CHECK-DAG: tail call void @llvm.assume(i1 %[[INRANGE]])
54+
// CHECK-DAG: %[[TRUNC:.+]] = trunc i32 %b to i16
55+
// CHECK-DAG: ashr i16 %a, %[[TRUNC]]
56+
a.unchecked_shr(b)
57+
}
58+
59+
// CHECK-LABEL: @unchecked_shr_signed_bigger
60+
#[no_mangle]
61+
pub unsafe fn unchecked_shr_signed_bigger(a: i64, b: u32) -> i64 {
62+
// CHECK: %[[EXT:.+]] = zext i32 %b to i64
63+
// CHECK: ashr i64 %a, %[[EXT]]
64+
a.unchecked_shr(b)
65+
}

0 commit comments

Comments
 (0)