@@ -3391,191 +3391,203 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) {
3391
3391
// return vqsubq_u64(a, b);
3392
3392
// }
3393
3393
3394
- // NYI-LABEL: @test_vshl_s8(
3395
- // NYI: [[VSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sshl.v8i8(<8 x i8> %a, <8 x i8> %b)
3396
- // NYI: ret <8 x i8> [[VSHL_V_I]]
3397
- // int8x8_t test_vshl_s8(int8x8_t a, int8x8_t b) {
3398
- // return vshl_s8(a, b);
3399
- // }
3394
+ int8x8_t test_vshl_s8(int8x8_t a, int8x8_t b) {
3395
+ return vshl_s8(a, b);
3400
3396
3401
- // NYI-LABEL: @test_vshl_s16(
3402
- // NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
3403
- // NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8>
3404
- // NYI: [[VSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sshl.v4i16(<4 x i16> %a, <4 x i16> %b)
3405
- // NYI: [[VSHL_V3_I:%.*]] = bitcast <4 x i16> [[VSHL_V2_I]] to <8 x i8>
3406
- // NYI: ret <4 x i16> [[VSHL_V2_I]]
3407
- // int16x4_t test_vshl_s16(int16x4_t a, int16x4_t b) {
3408
- // return vshl_s16(a, b);
3409
- // }
3397
+ // CIR-LABEL: vshl_s8
3398
+ // CIR: {{%.*}} = cir.shift(left, {{%.*}} : !cir.vector<!s8i x 8>, {{%.*}} : !cir.vector<!s8i x 8>) -> !cir.vector<!s8i x 8>
3410
3399
3411
- // NYI-LABEL: @test_vshl_s32(
3412
- // NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
3413
- // NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8>
3414
- // NYI: [[VSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sshl.v2i32(<2 x i32> %a, <2 x i32> %b)
3415
- // NYI: [[VSHL_V3_I:%.*]] = bitcast <2 x i32> [[VSHL_V2_I]] to <8 x i8>
3416
- // NYI: ret <2 x i32> [[VSHL_V2_I]]
3417
- // int32x2_t test_vshl_s32(int32x2_t a, int32x2_t b) {
3418
- // return vshl_s32(a, b);
3419
- // }
3400
+ // LLVM: {{.*}}test_vshl_s8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]])
3401
+ // LLVM: [[VSHL_V_I:%.*]] = shl <8 x i8> [[A]], [[B]]
3402
+ // LLVM: ret <8 x i8> [[VSHL_V_I]]
3403
+ }
3420
3404
3421
- // NYI-LABEL: @test_vshl_s64(
3422
- // NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
3423
- // NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8>
3424
- // NYI: [[VSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.sshl.v1i64(<1 x i64> %a, <1 x i64> %b)
3425
- // NYI: [[VSHL_V3_I:%.*]] = bitcast <1 x i64> [[VSHL_V2_I]] to <8 x i8>
3426
- // NYI: ret <1 x i64> [[VSHL_V2_I]]
3427
- // int64x1_t test_vshl_s64(int64x1_t a, int64x1_t b) {
3428
- // return vshl_s64(a, b);
3429
- // }
3405
+ int16x4_t test_vshl_s16(int16x4_t a, int16x4_t b) {
3406
+ return vshl_s16(a, b);
3430
3407
3431
- // NYI-LABEL: @test_vshl_u8(
3432
- // NYI: [[VSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.ushl.v8i8(<8 x i8> %a, <8 x i8> %b)
3433
- // NYI: ret <8 x i8> [[VSHL_V_I]]
3434
- // uint8x8_t test_vshl_u8(uint8x8_t a, int8x8_t b) {
3435
- // return vshl_u8(a, b);
3436
- // }
3408
+ // CIR-LABEL: vshl_s16
3409
+ // CIR: {{%.*}} = cir.shift(left, {{%.*}} : !cir.vector<!s16i x 4>, {{%.*}} : !cir.vector<!s16i x 4>) -> !cir.vector<!s16i x 4>
3437
3410
3438
- // NYI-LABEL: @test_vshl_u16(
3439
- // NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
3440
- // NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8>
3441
- // NYI: [[VSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.ushl.v4i16(<4 x i16> %a, <4 x i16> %b)
3442
- // NYI: [[VSHL_V3_I:%.*]] = bitcast <4 x i16> [[VSHL_V2_I]] to <8 x i8>
3443
- // NYI: ret <4 x i16> [[VSHL_V2_I]]
3444
- // uint16x4_t test_vshl_u16(uint16x4_t a, int16x4_t b) {
3445
- // return vshl_u16(a, b);
3446
- // }
3411
+ // LLVM: {{.*}}test_vshl_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]])
3412
+ // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
3413
+ // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
3414
+ // LLVM: [[VSHL_V2_I:%.*]] = shl <4 x i16> [[A]], [[B]]
3415
+ // LLVM: ret <4 x i16> [[VSHL_V2_I]]
3416
+ }
3447
3417
3448
- // NYI-LABEL: @test_vshl_u32(
3449
- // NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
3450
- // NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8>
3451
- // NYI: [[VSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.ushl.v2i32(<2 x i32> %a, <2 x i32> %b)
3452
- // NYI: [[VSHL_V3_I:%.*]] = bitcast <2 x i32> [[VSHL_V2_I]] to <8 x i8>
3453
- // NYI: ret <2 x i32> [[VSHL_V2_I]]
3454
- // uint32x2_t test_vshl_u32(uint32x2_t a, int32x2_t b) {
3455
- // return vshl_u32(a, b);
3456
- // }
3418
+ int32x2_t test_vshl_s32(int32x2_t a, int32x2_t b) {
3419
+ return vshl_s32(a, b);
3457
3420
3458
- // NYI-LABEL: @test_vshl_u64(
3459
- // NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
3460
- // NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8>
3461
- // NYI: [[VSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.ushl.v1i64(<1 x i64> %a, <1 x i64> %b)
3462
- // NYI: [[VSHL_V3_I:%.*]] = bitcast <1 x i64> [[VSHL_V2_I]] to <8 x i8>
3463
- // NYI: ret <1 x i64> [[VSHL_V2_I]]
3464
- // uint64x1_t test_vshl_u64(uint64x1_t a, int64x1_t b) {
3465
- // return vshl_u64(a, b);
3466
- // }
3421
+ // CIR-LABEL: vshl_s32
3422
+ // CIR: {{%.*}} = cir.shift(left, {{%.*}} : !cir.vector<!s32i x 2>, {{%.*}} : !cir.vector<!s32i x 2>) -> !cir.vector<!s32i x 2>
3423
+
3424
+ // LLVM: {{.*}}test_vshl_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]])
3425
+ // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
3426
+ // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
3427
+ // LLVM: [[VSHL_V2_I:%.*]] = shl <2 x i32> [[A]], [[B]]
3428
+ // LLVM: ret <2 x i32> [[VSHL_V2_I]]
3429
+ }
3430
+
3431
+ int64x1_t test_vshl_s64(int64x1_t a, int64x1_t b) {
3432
+ return vshl_s64(a, b);
3433
+
3434
+ // CIR-LABEL: vshl_s64
3435
+ // CIR: {{%.*}} = cir.shift(left, {{%.*}} : !cir.vector<!s64i x 1>, {{%.*}} : !cir.vector<!s64i x 1>) -> !cir.vector<!s64i x 1>
3436
+
3437
+ // LLVM: {{.*}}test_vshl_s64(<1 x i64>{{.*}}[[A:%.*]], <1 x i64>{{.*}}[[B:%.*]])
3438
+ // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
3439
+ // LLVM: [[TMP1:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
3440
+ // LLVM: [[VSHL_V2_I:%.*]] = shl <1 x i64> [[A]], [[B]]
3441
+ // LLVM: ret <1 x i64> [[VSHL_V2_I]]
3442
+ }
3443
+
3444
+ uint8x8_t test_vshl_u8(uint8x8_t a, int8x8_t b) {
3445
+ return vshl_u8(a, b);
3446
+
3447
+ // CIR-LABEL: vshl_u8
3448
+ // CIR: {{%.*}} = cir.shift(left, {{%.*}} : !cir.vector<!u8i x 8>, {{%.*}} : !cir.vector<!u8i x 8>) -> !cir.vector<!u8i x 8>
3449
+
3450
+ // LLVM: {{.*}}test_vshl_u8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]])
3451
+ // LLVM: [[VSHL_V_I:%.*]] = shl <8 x i8> [[A]], [[B]]
3452
+ // LLVM: ret <8 x i8> [[VSHL_V_I]]
3453
+ }
3454
+
3455
+ uint16x4_t test_vshl_u16(uint16x4_t a, int16x4_t b) {
3456
+ return vshl_u16(a, b);
3457
+
3458
+ // CIR-LABEL: vshl_u16
3459
+ // CIR: {{%.*}} = cir.shift(left, {{%.*}} : !cir.vector<!u16i x 4>, {{%.*}} : !cir.vector<!u16i x 4>) -> !cir.vector<!u16i x 4>
3460
+
3461
+ // LLVM: {{.*}}test_vshl_u16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]])
3462
+ // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
3463
+ // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
3464
+ // LLVM: [[VSHL_V2_I:%.*]] = shl <4 x i16> [[A]], [[B]]
3465
+ // LLVM: ret <4 x i16> [[VSHL_V2_I]]
3466
+ }
3467
+
3468
+ uint32x2_t test_vshl_u32(uint32x2_t a, int32x2_t b) {
3469
+ return vshl_u32(a, b);
3470
+
3471
+ // CIR-LABEL: vshl_u32
3472
+ // CIR: cir.shift(left, {{%.*}} : !cir.vector<!u32i x 2>, {{%.*}} : !cir.vector<!u32i x 2>) -> !cir.vector<!u32i x 2>
3473
+
3474
+ // LLVM: {{.*}}test_vshl_u32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]])
3475
+ // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
3476
+ // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
3477
+ // LLVM: [[VSHL_V2_I:%.*]] = shl <2 x i32> [[A]], [[B]]
3478
+ // LLVM: ret <2 x i32> [[VSHL_V2_I]]
3479
+ }
3480
+
3481
+ uint64x1_t test_vshl_u64(uint64x1_t a, int64x1_t b) {
3482
+ return vshl_u64(a, b);
3483
+
3484
+ // CIR-LABEL: vshl_u64
3485
+ // CIR: cir.shift(left, {{%.*}} : !cir.vector<!u64i x 1>, {{%.*}} : !cir.vector<!u64i x 1>) -> !cir.vector<!u64i x 1>
3486
+
3487
+ // LLVM: {{.*}}test_vshl_u64(<1 x i64>{{.*}}[[A:%.*]], <1 x i64>{{.*}}[[B:%.*]])
3488
+ // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
3489
+ // LLVM: [[TMP1:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
3490
+ // LLVM: [[VSHL_V2_I:%.*]] = shl <1 x i64> [[A]], [[B]]
3491
+ // LLVM: ret <1 x i64> [[VSHL_V2_I]]
3492
+ }
3467
3493
3468
3494
int8x16_t test_vshlq_s8(int8x16_t a, int8x16_t b) {
3469
3495
return vshlq_s8(a, b);
3470
3496
3471
3497
// CIR-LABEL: vshlq_s8
3472
- // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} :
3473
- // CIR-SAME: (!cir.vector<!s8i x 16>, !cir.vector<!s8i x 16>) -> !cir.vector<!s8i x 16>
3498
+ // CIR: cir.shift(left, {{%.*}} : !cir.vector<!s8i x 16>, {{%.*}} : !cir.vector<!s8i x 16>) -> !cir.vector<!s8i x 16>
3474
3499
3475
3500
// LLVM: {{.*}}test_vshlq_s8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]])
3476
- // LLVM: [[VSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sshl.v16i8(<16 x i8> [[A]], <16 x i8> [[B]])
3501
+ // LLVM: [[VSHLQ_V_I:%.*]] = shl <16 x i8> [[A]], [[B]]
3477
3502
// LLVM: ret <16 x i8> [[VSHLQ_V_I]]
3478
3503
}
3479
3504
3480
3505
int16x8_t test_vshlq_s16(int16x8_t a, int16x8_t b) {
3481
3506
return vshlq_s16(a, b);
3482
3507
3483
3508
// CIR-LABEL: vshlq_s16
3484
- // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} :
3485
- // CIR-SAME: (!cir.vector<!s16i x 8>, !cir.vector<!s16i x 8>) -> !cir.vector<!s16i x 8>
3509
+ // CIR: cir.shift(left, {{%.*}} : !cir.vector<!s16i x 8>, {{%.*}} : !cir.vector<!s16i x 8>) -> !cir.vector<!s16i x 8>
3486
3510
3487
3511
// LLVM: {{.*}}test_vshlq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]])
3488
3512
// LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
3489
3513
// LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
3490
- // LLVM: [[VSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sshl.v8i16(<8 x i16> [[A]], <8 x i16> [[B]])
3491
- // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VSHLQ_V2_I]] to <16 x i8>
3514
+ // LLVM: [[VSHLQ_V2_I:%.*]] = shl <8 x i16> [[A]], [[B]]
3492
3515
// LLVM: ret <8 x i16> [[VSHLQ_V2_I]]
3493
3516
}
3494
3517
3495
3518
int32x4_t test_vshlq_s32(int32x4_t a, int32x4_t b) {
3496
3519
return vshlq_s32(a, b);
3497
3520
3498
3521
// CIR-LABEL: vshlq_s32
3499
- // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} :
3500
- // CIR-SAME: (!cir.vector<!s32i x 4>, !cir.vector<!s32i x 4>) -> !cir.vector<!s32i x 4>
3522
+ // CIR: cir.shift(left, {{%.*}} : !cir.vector<!s32i x 4>, {{%.*}} : !cir.vector<!s32i x 4>) -> !cir.vector<!s32i x 4>
3501
3523
3502
3524
// LLVM: {{.*}}test_vshlq_s32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]])
3503
3525
// LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
3504
3526
// LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
3505
- // LLVM: [[VSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> [[A]], <4 x i32> [[B]])
3506
- // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VSHLQ_V2_I]] to <16 x i8>
3527
+ // LLVM: [[VSHLQ_V2_I:%.*]] = shl <4 x i32> [[A]], [[B]]
3507
3528
// LLVM: ret <4 x i32> [[VSHLQ_V2_I]]
3508
3529
}
3509
3530
3510
3531
int64x2_t test_vshlq_s64(int64x2_t a, int64x2_t b) {
3511
3532
return vshlq_s64(a, b);
3512
3533
3513
3534
// CIR-LABEL: vshlq_s64
3514
- // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} :
3515
- // CIR-SAME: (!cir.vector<!s64i x 2>, !cir.vector<!s64i x 2>) -> !cir.vector<!s64i x 2>
3535
+ // CIR: cir.shift(left, {{%.*}} : !cir.vector<!s64i x 2>, {{%.*}} : !cir.vector<!s64i x 2>) -> !cir.vector<!s64i x 2>
3516
3536
3517
3537
// LLVM: {{.*}}test_vshlq_s64(<2 x i64>{{.*}}[[A:%.*]], <2 x i64>{{.*}}[[B:%.*]])
3518
3538
// LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
3519
3539
// LLVM: [[TMP1:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8>
3520
- // LLVM: [[VSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sshl.v2i64(<2 x i64> [[A]], <2 x i64> [[B]])
3521
- // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VSHLQ_V2_I]] to <16 x i8>
3540
+ // LLVM: [[VSHLQ_V2_I:%.*]] = shl <2 x i64> [[A]], [[B]]
3522
3541
// LLVM: ret <2 x i64> [[VSHLQ_V2_I]]
3523
3542
}
3524
3543
3525
3544
uint8x16_t test_vshlq_u8(uint8x16_t a, int8x16_t b) {
3526
3545
return vshlq_u8(a, b);
3527
3546
3528
3547
// CIR-LABEL: vshlq_u8
3529
- // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} :
3530
- // CIR-SAME: (!cir.vector<!u8i x 16>, !cir.vector<!u8i x 16>) -> !cir.vector<!u8i x 16>
3548
+ // CIR: cir.shift(left, {{%.*}} : !cir.vector<!u8i x 16>, {{%.*}} : !cir.vector<!u8i x 16>) -> !cir.vector<!u8i x 16>
3531
3549
3532
3550
// LLVM: {{.*}}test_vshlq_u8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]])
3533
- // LLVM: [[VSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.ushl.v16i8(<16 x i8> [[A]], <16 x i8> [[B]])
3551
+ // LLVM: [[VSHLQ_V_I:%.*]] = shl <16 x i8> [[A]], [[B]]
3534
3552
// LLVM: ret <16 x i8> [[VSHLQ_V_I]]
3535
3553
}
3536
3554
3537
3555
uint16x8_t test_vshlq_u16(uint16x8_t a, int16x8_t b) {
3538
3556
return vshlq_u16(a, b);
3539
3557
3540
3558
// CIR-LABEL: vshlq_u16
3541
- // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} :
3542
- // CIR-SAME: (!cir.vector<!u16i x 8>, !cir.vector<!u16i x 8>) -> !cir.vector<!u16i x 8>
3559
+ // CIR: cir.shift(left, {{%.*}} : !cir.vector<!u16i x 8>, {{%.*}} : !cir.vector<!u16i x 8>) -> !cir.vector<!u16i x 8>
3543
3560
3544
3561
// LLVM: {{.*}}test_vshlq_u16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]])
3545
3562
// LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
3546
3563
// LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
3547
- // LLVM: [[VSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.ushl.v8i16(<8 x i16> [[A]], <8 x i16> [[B]])
3548
- // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VSHLQ_V2_I]] to <16 x i8>
3564
+ // LLVM: [[VSHLQ_V2_I:%.*]] = shl <8 x i16> [[A]], [[B]]
3549
3565
// LLVM: ret <8 x i16> [[VSHLQ_V2_I]]
3550
3566
}
3551
3567
3552
3568
uint32x4_t test_vshlq_u32(uint32x4_t a, int32x4_t b) {
3553
3569
return vshlq_u32(a, b);
3554
3570
3555
3571
// CIR-LABEL: vshlq_u32
3556
- // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} :
3557
- // CIR-SAME: (!cir.vector<!u32i x 4>, !cir.vector<!u32i x 4>) -> !cir.vector<!u32i x 4>
3572
+ // CIR: cir.shift(left, {{%.*}} : !cir.vector<!u32i x 4>, {{%.*}} : !cir.vector<!u32i x 4>) -> !cir.vector<!u32i x 4>
3558
3573
3559
3574
// LLVM: {{.*}}test_vshlq_u32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]])
3560
3575
// LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
3561
3576
// LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
3562
- // LLVM: [[VSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.ushl.v4i32(<4 x i32> [[A]], <4 x i32> [[B]])
3563
- // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VSHLQ_V2_I]] to <16 x i8>
3577
+ // LLVM: [[VSHLQ_V2_I:%.*]] = shl <4 x i32> [[A]], [[B]]
3564
3578
// LLVM: ret <4 x i32> [[VSHLQ_V2_I]]
3565
3579
}
3566
3580
3567
3581
uint64x2_t test_vshlq_u64(uint64x2_t a, int64x2_t b) {
3568
3582
return vshlq_u64(a, b);
3569
3583
3570
3584
// CIR-LABEL: vshlq_u64
3571
- // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} :
3572
- // CIR-SAME: (!cir.vector<!u64i x 2>, !cir.vector<!u64i x 2>) -> !cir.vector<!u64i x 2>
3585
+ // CIR: cir.shift(left, {{%.*}} : !cir.vector<!u64i x 2>, {{%.*}} : !cir.vector<!u64i x 2>) -> !cir.vector<!u64i x 2>
3573
3586
3574
3587
// LLVM: {{.*}}test_vshlq_u64(<2 x i64>{{.*}}[[A:%.*]], <2 x i64>{{.*}}[[B:%.*]])
3575
3588
// LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
3576
3589
// LLVM: [[TMP1:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8>
3577
- // LLVM: [[VSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.ushl.v2i64(<2 x i64> [[A]], <2 x i64> [[B]])
3578
- // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VSHLQ_V2_I]] to <16 x i8>
3590
+ // LLVM: [[VSHLQ_V2_I:%.*]] = shl <2 x i64> [[A]], [[B]]
3579
3591
// LLVM: ret <2 x i64> [[VSHLQ_V2_I]]
3580
3592
}
3581
3593
0 commit comments