@@ -1105,8 +1105,8 @@ fn generic_simd_intrinsic(
1105
1105
let m_len = match in_ty. kind {
1106
1106
// Note that this `.unwrap()` crashes for isize/usize, that's sort
1107
1107
// of intentional as there's not currently a use case for that.
1108
- ty:: Int ( i) => i. bit_width ( ) . unwrap ( ) ,
1109
- ty:: Uint ( i) => i. bit_width ( ) . unwrap ( ) ,
1108
+ ty:: Int ( i) => i. bit_width ( ) . unwrap ( ) as u64 ,
1109
+ ty:: Uint ( i) => i. bit_width ( ) . unwrap ( ) as u64 ,
1110
1110
_ => return_error ! ( "`{}` is not an integral type" , in_ty) ,
1111
1111
} ;
1112
1112
require_simd ! ( arg_tys[ 1 ] , "argument" ) ;
@@ -1116,7 +1116,7 @@ fn generic_simd_intrinsic(
1116
1116
m_len, v_len
1117
1117
) ;
1118
1118
let i1 = bx. type_i1 ( ) ;
1119
- let i1xn = bx. type_vector ( i1, m_len as u64 ) ;
1119
+ let i1xn = bx. type_vector ( i1, m_len) ;
1120
1120
let m_i1s = bx. bitcast ( args[ 0 ] . immediate ( ) , i1xn) ;
1121
1121
return Ok ( bx. select ( m_i1s, args[ 1 ] . immediate ( ) , args[ 2 ] . immediate ( ) ) ) ;
1122
1122
}
@@ -1160,7 +1160,7 @@ fn generic_simd_intrinsic(
1160
1160
}
1161
1161
1162
1162
if name. starts_with ( "simd_shuffle" ) {
1163
- let n: usize = name[ "simd_shuffle" . len ( ) ..] . parse ( ) . unwrap_or_else ( |_|
1163
+ let n: u64 = name[ "simd_shuffle" . len ( ) ..] . parse ( ) . unwrap_or_else ( |_|
1164
1164
span_bug ! ( span, "bad `simd_shuffle` instruction only caught in codegen?" ) ) ;
1165
1165
1166
1166
require_simd ! ( ret_ty, "return" ) ;
@@ -1175,7 +1175,7 @@ fn generic_simd_intrinsic(
1175
1175
in_elem, in_ty,
1176
1176
ret_ty, ret_ty. simd_type( tcx) ) ;
1177
1177
1178
- let total_len = in_len as u128 * 2 ;
1178
+ let total_len = u128:: from ( in_len ) * 2 ;
1179
1179
1180
1180
let vector = args[ 2 ] . immediate ( ) ;
1181
1181
@@ -1251,7 +1251,7 @@ fn generic_simd_intrinsic(
1251
1251
// trailing bits.
1252
1252
let expected_int_bits = in_len. max ( 8 ) ;
1253
1253
match ret_ty. kind {
1254
- ty:: Uint ( i) if i. bit_width ( ) == Some ( expected_int_bits) => ( ) ,
1254
+ ty:: Uint ( i) if i. bit_width ( ) == Some ( expected_int_bits as usize ) => ( ) ,
1255
1255
_ => return_error ! (
1256
1256
"bitmask `{}`, expected `u{}`" ,
1257
1257
ret_ty, expected_int_bits
@@ -1276,7 +1276,8 @@ fn generic_simd_intrinsic(
1276
1276
1277
1277
// Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
1278
1278
let shift_indices = vec ! [
1279
- bx. cx. const_int( bx. type_ix( in_elem_bitwidth as _) , ( in_elem_bitwidth - 1 ) as _) ; in_len
1279
+ bx. cx. const_int( bx. type_ix( in_elem_bitwidth as _) , ( in_elem_bitwidth - 1 ) as _) ;
1280
+ in_len as _
1280
1281
] ;
1281
1282
let i_xn_msb = bx. lshr ( i_xn, bx. const_vector ( shift_indices. as_slice ( ) ) ) ;
1282
1283
// Truncate vector to an <i1 x N>
@@ -1291,7 +1292,7 @@ fn generic_simd_intrinsic(
1291
1292
name : & str ,
1292
1293
in_elem : & :: rustc:: ty:: TyS < ' _ > ,
1293
1294
in_ty : & :: rustc:: ty:: TyS < ' _ > ,
1294
- in_len : usize ,
1295
+ in_len : u64 ,
1295
1296
bx : & mut Builder < ' a , ' ll , ' tcx > ,
1296
1297
span : Span ,
1297
1298
args : & [ OperandRef < ' tcx , & ' ll Value > ] ,
@@ -1400,7 +1401,7 @@ fn generic_simd_intrinsic(
1400
1401
// FIXME: use:
1401
1402
// https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
1402
1403
// https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
1403
- fn llvm_vector_str ( elem_ty : Ty < ' _ > , vec_len : usize , no_pointers : usize ) -> String {
1404
+ fn llvm_vector_str ( elem_ty : Ty < ' _ > , vec_len : u64 , no_pointers : usize ) -> String {
1404
1405
let p0s: String = "p0" . repeat ( no_pointers) ;
1405
1406
match elem_ty. kind {
1406
1407
ty:: Int ( v) => format ! ( "v{}{}i{}" , vec_len, p0s, v. bit_width( ) . unwrap( ) ) ,
@@ -1410,7 +1411,7 @@ fn generic_simd_intrinsic(
1410
1411
}
1411
1412
}
1412
1413
1413
- fn llvm_vector_ty ( cx : & CodegenCx < ' ll , ' _ > , elem_ty : Ty < ' _ > , vec_len : usize ,
1414
+ fn llvm_vector_ty ( cx : & CodegenCx < ' ll , ' _ > , elem_ty : Ty < ' _ > , vec_len : u64 ,
1414
1415
mut no_pointers : usize ) -> & ' ll Type {
1415
1416
// FIXME: use cx.layout_of(ty).llvm_type() ?
1416
1417
let mut elem_ty = match elem_ty. kind {
@@ -1423,7 +1424,7 @@ fn generic_simd_intrinsic(
1423
1424
elem_ty = cx. type_ptr_to ( elem_ty) ;
1424
1425
no_pointers -= 1 ;
1425
1426
}
1426
- cx. type_vector ( elem_ty, vec_len as u64 )
1427
+ cx. type_vector ( elem_ty, vec_len)
1427
1428
}
1428
1429
1429
1430
@@ -1506,7 +1507,7 @@ fn generic_simd_intrinsic(
1506
1507
// Truncate the mask vector to a vector of i1s:
1507
1508
let ( mask, mask_ty) = {
1508
1509
let i1 = bx. type_i1 ( ) ;
1509
- let i1xn = bx. type_vector ( i1, in_len as u64 ) ;
1510
+ let i1xn = bx. type_vector ( i1, in_len) ;
1510
1511
( bx. trunc ( args[ 2 ] . immediate ( ) , i1xn) , i1xn)
1511
1512
} ;
1512
1513
@@ -1606,7 +1607,7 @@ fn generic_simd_intrinsic(
1606
1607
// Truncate the mask vector to a vector of i1s:
1607
1608
let ( mask, mask_ty) = {
1608
1609
let i1 = bx. type_i1 ( ) ;
1609
- let i1xn = bx. type_vector ( i1, in_len as u64 ) ;
1610
+ let i1xn = bx. type_vector ( i1, in_len) ;
1610
1611
( bx. trunc ( args[ 2 ] . immediate ( ) , i1xn) , i1xn)
1611
1612
} ;
1612
1613
0 commit comments