|
7 | 7 | #include <sifive_vector.h>
|
8 | 8 |
|
9 | 9 | // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_sf_vqmaccsu_4x8x4_i32m1
|
10 |
| -// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { |
| 10 | +// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { |
11 | 11 | // CHECK-RV64-NEXT: entry:
|
12 |
| -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vqmaccsu.4x8x4.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 3) |
| 12 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vqmaccsu.4x8x4.nxv2i32.nxv8i8.nxv4i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 3) |
13 | 13 | // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
14 | 14 | //
|
15 |
| -vint32m1_t test_sf_vqmaccsu_4x8x4_i32m1(vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { |
| 15 | +vint32m1_t test_sf_vqmaccsu_4x8x4_i32m1(vint32m1_t vd, vint8m1_t vs1, vuint8mf2_t vs2, size_t vl) { |
16 | 16 | return __riscv_sf_vqmaccsu_4x8x4_i32m1(vd, vs1, vs2, vl);
|
17 | 17 | }
|
18 | 18 |
|
19 | 19 | // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_sf_vqmaccsu_4x8x4_i32m2
|
20 |
| -// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| 20 | +// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
21 | 21 | // CHECK-RV64-NEXT: entry:
|
22 |
| -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vqmaccsu.4x8x4.nxv4i32.nxv8i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 3) |
| 22 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vqmaccsu.4x8x4.nxv4i32.nxv8i8.nxv8i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 3) |
23 | 23 | // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
24 | 24 | //
|
25 |
| -vint32m2_t test_sf_vqmaccsu_4x8x4_i32m2(vint32m2_t vd, vint8m1_t vs1, vuint8m2_t vs2, size_t vl) { |
| 25 | +vint32m2_t test_sf_vqmaccsu_4x8x4_i32m2(vint32m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { |
26 | 26 | return __riscv_sf_vqmaccsu_4x8x4_i32m2(vd, vs1, vs2, vl);
|
27 | 27 | }
|
28 | 28 |
|
29 | 29 | // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_sf_vqmaccsu_4x8x4_i32m4
|
30 |
| -// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| 30 | +// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
31 | 31 | // CHECK-RV64-NEXT: entry:
|
32 |
| -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vqmaccsu.4x8x4.nxv8i32.nxv8i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], i64 [[VL]], i64 3) |
| 32 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vqmaccsu.4x8x4.nxv8i32.nxv8i8.nxv16i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 3) |
33 | 33 | // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
34 | 34 | //
|
35 |
| -vint32m4_t test_sf_vqmaccsu_4x8x4_i32m4(vint32m4_t vd, vint8m1_t vs1, vuint8m4_t vs2, size_t vl) { |
| 35 | +vint32m4_t test_sf_vqmaccsu_4x8x4_i32m4(vint32m4_t vd, vint8m1_t vs1, vuint8m2_t vs2, size_t vl) { |
36 | 36 | return __riscv_sf_vqmaccsu_4x8x4_i32m4(vd, vs1, vs2, vl);
|
37 | 37 | }
|
38 | 38 |
|
39 | 39 | // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_sf_vqmaccsu_4x8x4_i32m8
|
40 |
| -// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| 40 | +// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
41 | 41 | // CHECK-RV64-NEXT: entry:
|
42 |
| -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vqmaccsu.4x8x4.nxv16i32.nxv8i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 64 x i8> [[VS2]], i64 [[VL]], i64 3) |
| 42 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vqmaccsu.4x8x4.nxv16i32.nxv8i8.nxv32i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], i64 [[VL]], i64 3) |
43 | 43 | // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
44 | 44 | //
|
45 |
| -vint32m8_t test_sf_vqmaccsu_4x8x4_i32m8(vint32m8_t vd, vint8m1_t vs1, vuint8m8_t vs2, size_t vl) { |
| 45 | +vint32m8_t test_sf_vqmaccsu_4x8x4_i32m8(vint32m8_t vd, vint8m1_t vs1, vuint8m4_t vs2, size_t vl) { |
46 | 46 | return __riscv_sf_vqmaccsu_4x8x4_i32m8(vd, vs1, vs2, vl);
|
47 | 47 | }
|
0 commit comments