@@ -2977,27 +2977,27 @@ pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t
2977
2977
#[ target_feature( enable = "neon" ) ]
2978
2978
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
2979
2979
#[ rustc_legacy_const_generics( 1 ) ]
2980
- #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vmov.32" , imm5 = 1 ) ) ]
2981
- #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov, imm5 = 1 ) ) ]
2980
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vmov.32" , IMM5 = 1 ) ) ]
2981
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov, IMM5 = 1 ) ) ]
2982
2982
// Based on the discussion in https://github.com/rust-lang/stdarch/pull/792
2983
2983
// `mov` seems to be an acceptable intrinsic to compile to
2984
- // #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(vmov, imm5 = 1))]
2985
- pub unsafe fn vgetq_lane_u64 < const imm5 : i32 > ( v : uint64x2_t ) -> u64 {
2986
- static_assert_imm1 ! ( imm5 ) ;
2987
- simd_extract ( v, imm5 as u32 )
2984
+ // #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(vmov, IMM5 = 1))]
2985
+ pub unsafe fn vgetq_lane_u64 < const IMM5 : i32 > ( v : uint64x2_t ) -> u64 {
2986
+ static_assert_imm1 ! ( IMM5 ) ;
2987
+ simd_extract ( v, IMM5 as u32 )
2988
2988
}
2989
2989
2990
2990
/// Move vector element to general-purpose register
2991
2991
#[ inline]
2992
2992
#[ target_feature( enable = "neon" ) ]
2993
2993
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
2994
2994
#[ rustc_legacy_const_generics( 1 ) ]
2995
- #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vmov.32" , imm5 = 0 ) ) ]
2996
- #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( fmov, imm5 = 0 ) ) ]
2995
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vmov.32" , IMM5 = 0 ) ) ]
2996
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( fmov, IMM5 = 0 ) ) ]
2997
2997
// FIXME: no 32bit this seems to be turned into two vmov.32 instructions
2998
2998
// validate correctness
2999
- pub unsafe fn vget_lane_u64 < const imm5 : i32 > ( v : uint64x1_t ) -> u64 {
3000
- static_assert ! ( imm5 : i32 where imm5 == 0 ) ;
2999
+ pub unsafe fn vget_lane_u64 < const IMM5 : i32 > ( v : uint64x1_t ) -> u64 {
3000
+ static_assert ! ( IMM5 : i32 where IMM5 == 0 ) ;
3001
3001
simd_extract ( v, 0 )
3002
3002
}
3003
3003
@@ -3006,47 +3006,47 @@ pub unsafe fn vget_lane_u64<const imm5: i32>(v: uint64x1_t) -> u64 {
3006
3006
#[ target_feature( enable = "neon" ) ]
3007
3007
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
3008
3008
#[ rustc_legacy_const_generics( 1 ) ]
3009
- #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vmov.u16" , imm5 = 2 ) ) ]
3010
- #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( umov, imm5 = 2 ) ) ]
3011
- pub unsafe fn vgetq_lane_u16 < const imm5 : i32 > ( v : uint16x8_t ) -> u16 {
3012
- static_assert_imm3 ! ( imm5 ) ;
3013
- simd_extract ( v, imm5 as u32 )
3009
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vmov.u16" , IMM5 = 2 ) ) ]
3010
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( umov, IMM5 = 2 ) ) ]
3011
+ pub unsafe fn vgetq_lane_u16 < const IMM5 : i32 > ( v : uint16x8_t ) -> u16 {
3012
+ static_assert_imm3 ! ( IMM5 ) ;
3013
+ simd_extract ( v, IMM5 as u32 )
3014
3014
}
3015
3015
3016
3016
/// Move vector element to general-purpose register
3017
3017
#[ inline]
3018
3018
#[ target_feature( enable = "neon" ) ]
3019
3019
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
3020
3020
#[ rustc_legacy_const_generics( 1 ) ]
3021
- #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vmov.32" , imm5 = 2 ) ) ]
3022
- #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov, imm5 = 2 ) ) ]
3023
- pub unsafe fn vgetq_lane_u32 < const imm5 : i32 > ( v : uint32x4_t ) -> u32 {
3024
- static_assert_imm2 ! ( imm5 ) ;
3025
- simd_extract ( v, imm5 as u32 )
3021
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vmov.32" , IMM5 = 2 ) ) ]
3022
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov, IMM5 = 2 ) ) ]
3023
+ pub unsafe fn vgetq_lane_u32 < const IMM5 : i32 > ( v : uint32x4_t ) -> u32 {
3024
+ static_assert_imm2 ! ( IMM5 ) ;
3025
+ simd_extract ( v, IMM5 as u32 )
3026
3026
}
3027
3027
3028
3028
/// Move vector element to general-purpose register
3029
3029
#[ inline]
3030
3030
#[ target_feature( enable = "neon" ) ]
3031
3031
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
3032
3032
#[ rustc_legacy_const_generics( 1 ) ]
3033
- #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vmov.32" , imm5 = 2 ) ) ]
3034
- #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov, imm5 = 2 ) ) ]
3035
- pub unsafe fn vgetq_lane_s32 < const imm5 : i32 > ( v : int32x4_t ) -> i32 {
3036
- static_assert_imm2 ! ( imm5 ) ;
3037
- simd_extract ( v, imm5 as u32 )
3033
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vmov.32" , IMM5 = 2 ) ) ]
3034
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov, IMM5 = 2 ) ) ]
3035
+ pub unsafe fn vgetq_lane_s32 < const IMM5 : i32 > ( v : int32x4_t ) -> i32 {
3036
+ static_assert_imm2 ! ( IMM5 ) ;
3037
+ simd_extract ( v, IMM5 as u32 )
3038
3038
}
3039
3039
3040
3040
/// Move vector element to general-purpose register
3041
3041
#[ inline]
3042
3042
#[ target_feature( enable = "neon" ) ]
3043
3043
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
3044
3044
#[ rustc_legacy_const_generics( 1 ) ]
3045
- #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vmov.u8" , imm5 = 2 ) ) ]
3046
- #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( umov, imm5 = 2 ) ) ]
3047
- pub unsafe fn vget_lane_u8 < const imm5 : i32 > ( v : uint8x8_t ) -> u8 {
3048
- static_assert_imm3 ! ( imm5 ) ;
3049
- simd_extract ( v, imm5 as u32 )
3045
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vmov.u8" , IMM5 = 2 ) ) ]
3046
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( umov, IMM5 = 2 ) ) ]
3047
+ pub unsafe fn vget_lane_u8 < const IMM5 : i32 > ( v : uint8x8_t ) -> u8 {
3048
+ static_assert_imm3 ! ( IMM5 ) ;
3049
+ simd_extract ( v, IMM5 as u32 )
3050
3050
}
3051
3051
3052
3052
/// Duplicate vector element to vector or scalar
@@ -3143,30 +3143,30 @@ pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t {
3143
3143
#[ inline]
3144
3144
#[ target_feature( enable = "neon" ) ]
3145
3145
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
3146
- #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vshr.u8" , imm3 = 1 ) ) ]
3147
- #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( "ushr" , imm3 = 1 ) ) ]
3146
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vshr.u8" , IMM3 = 1 ) ) ]
3147
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( "ushr" , IMM3 = 1 ) ) ]
3148
3148
#[ rustc_legacy_const_generics( 1 ) ]
3149
- pub unsafe fn vshrq_n_u8 < const imm3 : i32 > ( a : uint8x16_t ) -> uint8x16_t {
3150
- if imm3 < 0 || imm3 > 7 {
3149
+ pub unsafe fn vshrq_n_u8 < const IMM3 : i32 > ( a : uint8x16_t ) -> uint8x16_t {
3150
+ if IMM3 < 0 || IMM3 > 7 {
3151
3151
unreachable_unchecked ( ) ;
3152
3152
} else {
3153
3153
uint8x16_t (
3154
- a. 0 >> imm3 ,
3155
- a. 1 >> imm3 ,
3156
- a. 2 >> imm3 ,
3157
- a. 3 >> imm3 ,
3158
- a. 4 >> imm3 ,
3159
- a. 5 >> imm3 ,
3160
- a. 6 >> imm3 ,
3161
- a. 7 >> imm3 ,
3162
- a. 8 >> imm3 ,
3163
- a. 9 >> imm3 ,
3164
- a. 10 >> imm3 ,
3165
- a. 11 >> imm3 ,
3166
- a. 12 >> imm3 ,
3167
- a. 13 >> imm3 ,
3168
- a. 14 >> imm3 ,
3169
- a. 15 >> imm3 ,
3154
+ a. 0 >> IMM3 ,
3155
+ a. 1 >> IMM3 ,
3156
+ a. 2 >> IMM3 ,
3157
+ a. 3 >> IMM3 ,
3158
+ a. 4 >> IMM3 ,
3159
+ a. 5 >> IMM3 ,
3160
+ a. 6 >> IMM3 ,
3161
+ a. 7 >> IMM3 ,
3162
+ a. 8 >> IMM3 ,
3163
+ a. 9 >> IMM3 ,
3164
+ a. 10 >> IMM3 ,
3165
+ a. 11 >> IMM3 ,
3166
+ a. 12 >> IMM3 ,
3167
+ a. 13 >> IMM3 ,
3168
+ a. 14 >> IMM3 ,
3169
+ a. 15 >> IMM3 ,
3170
3170
)
3171
3171
}
3172
3172
}
@@ -3175,30 +3175,30 @@ pub unsafe fn vshrq_n_u8<const imm3: i32>(a: uint8x16_t) -> uint8x16_t {
3175
3175
#[ inline]
3176
3176
#[ target_feature( enable = "neon" ) ]
3177
3177
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
3178
- #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vshl.s8" , imm3 = 1 ) ) ]
3179
- #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( shl, imm3 = 1 ) ) ]
3178
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( "vshl.s8" , IMM3 = 1 ) ) ]
3179
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( shl, IMM3 = 1 ) ) ]
3180
3180
#[ rustc_legacy_const_generics( 1 ) ]
3181
- pub unsafe fn vshlq_n_u8 < const imm3 : i32 > ( a : uint8x16_t ) -> uint8x16_t {
3182
- if imm3 < 0 || imm3 > 7 {
3181
+ pub unsafe fn vshlq_n_u8 < const IMM3 : i32 > ( a : uint8x16_t ) -> uint8x16_t {
3182
+ if IMM3 < 0 || IMM3 > 7 {
3183
3183
unreachable_unchecked ( ) ;
3184
3184
} else {
3185
3185
uint8x16_t (
3186
- a. 0 << imm3 ,
3187
- a. 1 << imm3 ,
3188
- a. 2 << imm3 ,
3189
- a. 3 << imm3 ,
3190
- a. 4 << imm3 ,
3191
- a. 5 << imm3 ,
3192
- a. 6 << imm3 ,
3193
- a. 7 << imm3 ,
3194
- a. 8 << imm3 ,
3195
- a. 9 << imm3 ,
3196
- a. 10 << imm3 ,
3197
- a. 11 << imm3 ,
3198
- a. 12 << imm3 ,
3199
- a. 13 << imm3 ,
3200
- a. 14 << imm3 ,
3201
- a. 15 << imm3 ,
3186
+ a. 0 << IMM3 ,
3187
+ a. 1 << IMM3 ,
3188
+ a. 2 << IMM3 ,
3189
+ a. 3 << IMM3 ,
3190
+ a. 4 << IMM3 ,
3191
+ a. 5 << IMM3 ,
3192
+ a. 6 << IMM3 ,
3193
+ a. 7 << IMM3 ,
3194
+ a. 8 << IMM3 ,
3195
+ a. 9 << IMM3 ,
3196
+ a. 10 << IMM3 ,
3197
+ a. 11 << IMM3 ,
3198
+ a. 12 << IMM3 ,
3199
+ a. 13 << IMM3 ,
3200
+ a. 14 << IMM3 ,
3201
+ a. 15 << IMM3 ,
3202
3202
)
3203
3203
}
3204
3204
}
0 commit comments