Skip to content

Commit 236e3c3

Browse files
authored
[Clang][XTHeadVector] Implement 12.5 vsll/vsrl/vsra (llvm#71)
* [Clang][XTHeadVector] Define `vsll/vsrl/vsra` Reference: ruyisdk#55 * [Clang][XTHeadVector] Test `vsll/vsrl/vsra` * [Clang][XTHeadVector] Add `vsll/vsrl/vsra` wrappers * [Clang][XTHeadVector] Test `vsll/vsrl/vsra` wrappers * [NFC][XTHeadVector] add todo comments to avoid conflicts in the future
1 parent b64a49a commit 236e3c3

File tree

8 files changed

+2810
-0
lines changed

8 files changed

+2810
-0
lines changed

clang/include/clang/Basic/riscv_vector_xtheadv.td

+51
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,30 @@ multiclass RVVCarryOutInBuiltinSet<string intrinsic_name>
140140
["vvm", "Uvm", "mUvUvm"],
141141
["vxm", "Uvm", "mUvUem"]]>;
142142

143+
multiclass RVVSignedShiftBuiltinSet
144+
: RVVOutOp1BuiltinSet<NAME, "csil",
145+
[["vv", "v", "vvUv"],
146+
["vx", "v", "vvz"]]>;
147+
148+
multiclass RVVSignedShiftBuiltinSetRoundingMode
149+
: RVVOutOp1BuiltinSet<NAME, "csil",
150+
[["vv", "v", "vvUvu"],
151+
["vx", "v", "vvzu"]]>;
152+
153+
multiclass RVVUnsignedShiftBuiltinSet
154+
: RVVOutOp1BuiltinSet<NAME, "csil",
155+
[["vv", "Uv", "UvUvUv"],
156+
["vx", "Uv", "UvUvz"]]>;
157+
158+
multiclass RVVUnsignedShiftBuiltinSetRoundingMode
159+
: RVVOutOp1BuiltinSet<NAME, "csil",
160+
[["vv", "Uv", "UvUvUvu"],
161+
["vx", "Uv", "UvUvzu"]]>;
162+
163+
multiclass RVVShiftBuiltinSet
164+
: RVVSignedShiftBuiltinSet,
165+
RVVUnsignedShiftBuiltinSet;
166+
143167
//===----------------------------------------------------------------------===//
144168
// 6. Configuration-Setting and Utility
145169
//===----------------------------------------------------------------------===//
@@ -969,4 +993,31 @@ let UnMaskedPolicyScheme = HasPassthruOperand in {
969993
}
970994
defm th_vnot_v : RVVPseudoVNotBuiltin<"th_vxor", "csil">;
971995

996+
// 12.5. Vector Single-Width Bit Shift Operations
997+
let UnMaskedPolicyScheme = HasPassthruOperand in {
998+
defm th_vsll : RVVShiftBuiltinSet;
999+
defm th_vsrl : RVVUnsignedShiftBuiltinSet;
1000+
defm th_vsra : RVVSignedShiftBuiltinSet;
1001+
}
1002+
1003+
// 12.6. Vector Narrowing Integer Right Shift Operations
1004+
1005+
// 12.7. Vector Integer Comparison Operations
1006+
1007+
// 12.8. Vector Integer Min/Max Operations
1008+
1009+
// 12.9. Vector Single-Width Integer Multiply Operations
1010+
1011+
// 12.10. Vector Integer Divide Operations
1012+
1013+
// 12.11. Vector Widening Integer Multiply Operations
1014+
1015+
// 12.12. Vector Single-Width Integer Multiply-Add Operations
1016+
1017+
// 12.13. Vector Widening Integer Multiply-Add Operations
1018+
1019+
// 12.14. Vector Integer Merge Operations
1020+
1021+
// 12.15. Vector Integer Move Operations
1022+
9721023
include "riscv_vector_xtheadv_wrappers.td"

clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td

+163
Original file line numberDiff line numberDiff line change
@@ -1495,3 +1495,166 @@ let HeaderCode =
14951495

14961496
}] in
14971497
def th_bitwise_logical_wrapper_macros: RVVHeader;
1498+
1499+
let HeaderCode =
1500+
[{
1501+
// Vector Single Width Integer Bit Shift Operations
1502+
#define __riscv_vsll_vv_i8m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_i8m1(op1_v, shift_v, vl)
1503+
#define __riscv_vsll_vv_i8m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_i8m2(op1_v, shift_v, vl)
1504+
#define __riscv_vsll_vv_i8m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_i8m4(op1_v, shift_v, vl)
1505+
#define __riscv_vsll_vv_i8m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_i8m8(op1_v, shift_v, vl)
1506+
#define __riscv_vsll_vv_i16m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_i16m1(op1_v, shift_v, vl)
1507+
#define __riscv_vsll_vv_i16m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_i16m2(op1_v, shift_v, vl)
1508+
#define __riscv_vsll_vv_i16m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_i16m4(op1_v, shift_v, vl)
1509+
#define __riscv_vsll_vv_i16m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_i16m8(op1_v, shift_v, vl)
1510+
#define __riscv_vsll_vv_i32m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_i32m1(op1_v, shift_v, vl)
1511+
#define __riscv_vsll_vv_i32m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_i32m2(op1_v, shift_v, vl)
1512+
#define __riscv_vsll_vv_i32m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_i32m4(op1_v, shift_v, vl)
1513+
#define __riscv_vsll_vv_i32m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_i32m8(op1_v, shift_v, vl)
1514+
#define __riscv_vsll_vv_i64m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_i64m1(op1_v, shift_v, vl)
1515+
#define __riscv_vsll_vv_i64m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_i64m2(op1_v, shift_v, vl)
1516+
#define __riscv_vsll_vv_i64m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_i64m4(op1_v, shift_v, vl)
1517+
#define __riscv_vsll_vv_i64m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_i64m8(op1_v, shift_v, vl)
1518+
1519+
#define __riscv_vsll_vv_u8m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_u8m1(op1_v, shift_v, vl)
1520+
#define __riscv_vsll_vv_u8m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_u8m2(op1_v, shift_v, vl)
1521+
#define __riscv_vsll_vv_u8m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_u8m4(op1_v, shift_v, vl)
1522+
#define __riscv_vsll_vv_u8m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_u8m8(op1_v, shift_v, vl)
1523+
#define __riscv_vsll_vv_u16m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_u16m1(op1_v, shift_v, vl)
1524+
#define __riscv_vsll_vv_u16m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_u16m2(op1_v, shift_v, vl)
1525+
#define __riscv_vsll_vv_u16m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_u16m4(op1_v, shift_v, vl)
1526+
#define __riscv_vsll_vv_u16m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_u16m8(op1_v, shift_v, vl)
1527+
#define __riscv_vsll_vv_u32m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_u32m1(op1_v, shift_v, vl)
1528+
#define __riscv_vsll_vv_u32m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_u32m2(op1_v, shift_v, vl)
1529+
#define __riscv_vsll_vv_u32m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_u32m4(op1_v, shift_v, vl)
1530+
#define __riscv_vsll_vv_u32m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_u32m8(op1_v, shift_v, vl)
1531+
#define __riscv_vsll_vv_u64m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_u64m1(op1_v, shift_v, vl)
1532+
#define __riscv_vsll_vv_u64m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_u64m2(op1_v, shift_v, vl)
1533+
#define __riscv_vsll_vv_u64m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_u64m4(op1_v, shift_v, vl)
1534+
#define __riscv_vsll_vv_u64m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_u64m8(op1_v, shift_v, vl)
1535+
1536+
#define __riscv_vsll_vx_i8m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_i8m1(op1_v, shift_x, vl)
1537+
#define __riscv_vsll_vx_i8m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_i8m2(op1_v, shift_x, vl)
1538+
#define __riscv_vsll_vx_i8m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_i8m4(op1_v, shift_x, vl)
1539+
#define __riscv_vsll_vx_i8m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_i8m8(op1_v, shift_x, vl)
1540+
#define __riscv_vsll_vx_i16m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_i16m1(op1_v, shift_x, vl)
1541+
#define __riscv_vsll_vx_i16m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_i16m2(op1_v, shift_x, vl)
1542+
#define __riscv_vsll_vx_i16m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_i16m4(op1_v, shift_x, vl)
1543+
#define __riscv_vsll_vx_i16m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_i16m8(op1_v, shift_x, vl)
1544+
#define __riscv_vsll_vx_i32m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_i32m1(op1_v, shift_x, vl)
1545+
#define __riscv_vsll_vx_i32m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_i32m2(op1_v, shift_x, vl)
1546+
#define __riscv_vsll_vx_i32m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_i32m4(op1_v, shift_x, vl)
1547+
#define __riscv_vsll_vx_i32m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_i32m8(op1_v, shift_x, vl)
1548+
#define __riscv_vsll_vx_i64m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_i64m1(op1_v, shift_x, vl)
1549+
#define __riscv_vsll_vx_i64m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_i64m2(op1_v, shift_x, vl)
1550+
#define __riscv_vsll_vx_i64m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_i64m4(op1_v, shift_x, vl)
1551+
#define __riscv_vsll_vx_i64m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_i64m8(op1_v, shift_x, vl)
1552+
1553+
#define __riscv_vsll_vx_u8m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_u8m1(op1_v, shift_x, vl)
1554+
#define __riscv_vsll_vx_u8m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_u8m2(op1_v, shift_x, vl)
1555+
#define __riscv_vsll_vx_u8m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_u8m4(op1_v, shift_x, vl)
1556+
#define __riscv_vsll_vx_u8m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_u8m8(op1_v, shift_x, vl)
1557+
#define __riscv_vsll_vx_u16m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_u16m1(op1_v, shift_x, vl)
1558+
#define __riscv_vsll_vx_u16m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_u16m2(op1_v, shift_x, vl)
1559+
#define __riscv_vsll_vx_u16m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_u16m4(op1_v, shift_x, vl)
1560+
#define __riscv_vsll_vx_u16m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_u16m8(op1_v, shift_x, vl)
1561+
#define __riscv_vsll_vx_u32m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_u32m1(op1_v, shift_x, vl)
1562+
#define __riscv_vsll_vx_u32m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_u32m2(op1_v, shift_x, vl)
1563+
#define __riscv_vsll_vx_u32m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_u32m4(op1_v, shift_x, vl)
1564+
#define __riscv_vsll_vx_u32m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_u32m8(op1_v, shift_x, vl)
1565+
#define __riscv_vsll_vx_u64m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_u64m1(op1_v, shift_x, vl)
1566+
#define __riscv_vsll_vx_u64m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_u64m2(op1_v, shift_x, vl)
1567+
#define __riscv_vsll_vx_u64m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_u64m4(op1_v, shift_x, vl)
1568+
#define __riscv_vsll_vx_u64m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_u64m8(op1_v, shift_x, vl)
1569+
1570+
#define __riscv_vsrl_vv_u8m1(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u8m1(op1_v, shift_v, vl)
1571+
#define __riscv_vsrl_vv_u8m2(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u8m2(op1_v, shift_v, vl)
1572+
#define __riscv_vsrl_vv_u8m4(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u8m4(op1_v, shift_v, vl)
1573+
#define __riscv_vsrl_vv_u8m8(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u8m8(op1_v, shift_v, vl)
1574+
#define __riscv_vsrl_vv_u16m1(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u16m1(op1_v, shift_v, vl)
1575+
#define __riscv_vsrl_vv_u16m2(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u16m2(op1_v, shift_v, vl)
1576+
#define __riscv_vsrl_vv_u16m4(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u16m4(op1_v, shift_v, vl)
1577+
#define __riscv_vsrl_vv_u16m8(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u16m8(op1_v, shift_v, vl)
1578+
#define __riscv_vsrl_vv_u32m1(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u32m1(op1_v, shift_v, vl)
1579+
#define __riscv_vsrl_vv_u32m2(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u32m2(op1_v, shift_v, vl)
1580+
#define __riscv_vsrl_vv_u32m4(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u32m4(op1_v, shift_v, vl)
1581+
#define __riscv_vsrl_vv_u32m8(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u32m8(op1_v, shift_v, vl)
1582+
#define __riscv_vsrl_vv_u64m1(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u64m1(op1_v, shift_v, vl)
1583+
#define __riscv_vsrl_vv_u64m2(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u64m2(op1_v, shift_v, vl)
1584+
#define __riscv_vsrl_vv_u64m4(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u64m4(op1_v, shift_v, vl)
1585+
#define __riscv_vsrl_vv_u64m8(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u64m8(op1_v, shift_v, vl)
1586+
1587+
#define __riscv_vsrl_vx_u8m1(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u8m1(op1_v, shift_x, vl)
1588+
#define __riscv_vsrl_vx_u8m2(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u8m2(op1_v, shift_x, vl)
1589+
#define __riscv_vsrl_vx_u8m4(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u8m4(op1_v, shift_x, vl)
1590+
#define __riscv_vsrl_vx_u8m8(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u8m8(op1_v, shift_x, vl)
1591+
#define __riscv_vsrl_vx_u16m1(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u16m1(op1_v, shift_x, vl)
1592+
#define __riscv_vsrl_vx_u16m2(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u16m2(op1_v, shift_x, vl)
1593+
#define __riscv_vsrl_vx_u16m4(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u16m4(op1_v, shift_x, vl)
1594+
#define __riscv_vsrl_vx_u16m8(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u16m8(op1_v, shift_x, vl)
1595+
#define __riscv_vsrl_vx_u32m1(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u32m1(op1_v, shift_x, vl)
1596+
#define __riscv_vsrl_vx_u32m2(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u32m2(op1_v, shift_x, vl)
1597+
#define __riscv_vsrl_vx_u32m4(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u32m4(op1_v, shift_x, vl)
1598+
#define __riscv_vsrl_vx_u32m8(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u32m8(op1_v, shift_x, vl)
1599+
#define __riscv_vsrl_vx_u64m1(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u64m1(op1_v, shift_x, vl)
1600+
#define __riscv_vsrl_vx_u64m2(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u64m2(op1_v, shift_x, vl)
1601+
#define __riscv_vsrl_vx_u64m4(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u64m4(op1_v, shift_x, vl)
1602+
#define __riscv_vsrl_vx_u64m8(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u64m8(op1_v, shift_x, vl)
1603+
1604+
#define __riscv_vsra_vv_i8m1(op1_v, shift_v, vl) __riscv_th_vsra_vv_i8m1(op1_v, shift_v, vl)
1605+
#define __riscv_vsra_vv_i8m2(op1_v, shift_v, vl) __riscv_th_vsra_vv_i8m2(op1_v, shift_v, vl)
1606+
#define __riscv_vsra_vv_i8m4(op1_v, shift_v, vl) __riscv_th_vsra_vv_i8m4(op1_v, shift_v, vl)
1607+
#define __riscv_vsra_vv_i8m8(op1_v, shift_v, vl) __riscv_th_vsra_vv_i8m8(op1_v, shift_v, vl)
1608+
#define __riscv_vsra_vv_i16m1(op1_v, shift_v, vl) __riscv_th_vsra_vv_i16m1(op1_v, shift_v, vl)
1609+
#define __riscv_vsra_vv_i16m2(op1_v, shift_v, vl) __riscv_th_vsra_vv_i16m2(op1_v, shift_v, vl)
1610+
#define __riscv_vsra_vv_i16m4(op1_v, shift_v, vl) __riscv_th_vsra_vv_i16m4(op1_v, shift_v, vl)
1611+
#define __riscv_vsra_vv_i16m8(op1_v, shift_v, vl) __riscv_th_vsra_vv_i16m8(op1_v, shift_v, vl)
1612+
#define __riscv_vsra_vv_i32m1(op1_v, shift_v, vl) __riscv_th_vsra_vv_i32m1(op1_v, shift_v, vl)
1613+
#define __riscv_vsra_vv_i32m2(op1_v, shift_v, vl) __riscv_th_vsra_vv_i32m2(op1_v, shift_v, vl)
1614+
#define __riscv_vsra_vv_i32m4(op1_v, shift_v, vl) __riscv_th_vsra_vv_i32m4(op1_v, shift_v, vl)
1615+
#define __riscv_vsra_vv_i32m8(op1_v, shift_v, vl) __riscv_th_vsra_vv_i32m8(op1_v, shift_v, vl)
1616+
#define __riscv_vsra_vv_i64m1(op1_v, shift_v, vl) __riscv_th_vsra_vv_i64m1(op1_v, shift_v, vl)
1617+
#define __riscv_vsra_vv_i64m2(op1_v, shift_v, vl) __riscv_th_vsra_vv_i64m2(op1_v, shift_v, vl)
1618+
#define __riscv_vsra_vv_i64m4(op1_v, shift_v, vl) __riscv_th_vsra_vv_i64m4(op1_v, shift_v, vl)
1619+
#define __riscv_vsra_vv_i64m8(op1_v, shift_v, vl) __riscv_th_vsra_vv_i64m8(op1_v, shift_v, vl)
1620+
1621+
#define __riscv_vsra_vx_i8m1(op1_v, shift_x, vl) __riscv_th_vsra_vx_i8m1(op1_v, shift_x, vl)
1622+
#define __riscv_vsra_vx_i8m2(op1_v, shift_x, vl) __riscv_th_vsra_vx_i8m2(op1_v, shift_x, vl)
1623+
#define __riscv_vsra_vx_i8m4(op1_v, shift_x, vl) __riscv_th_vsra_vx_i8m4(op1_v, shift_x, vl)
1624+
#define __riscv_vsra_vx_i8m8(op1_v, shift_x, vl) __riscv_th_vsra_vx_i8m8(op1_v, shift_x, vl)
1625+
#define __riscv_vsra_vx_i16m1(op1_v, shift_x, vl) __riscv_th_vsra_vx_i16m1(op1_v, shift_x, vl)
1626+
#define __riscv_vsra_vx_i16m2(op1_v, shift_x, vl) __riscv_th_vsra_vx_i16m2(op1_v, shift_x, vl)
1627+
#define __riscv_vsra_vx_i16m4(op1_v, shift_x, vl) __riscv_th_vsra_vx_i16m4(op1_v, shift_x, vl)
1628+
#define __riscv_vsra_vx_i16m8(op1_v, shift_x, vl) __riscv_th_vsra_vx_i16m8(op1_v, shift_x, vl)
1629+
#define __riscv_vsra_vx_i32m1(op1_v, shift_x, vl) __riscv_th_vsra_vx_i32m1(op1_v, shift_x, vl)
1630+
#define __riscv_vsra_vx_i32m2(op1_v, shift_x, vl) __riscv_th_vsra_vx_i32m2(op1_v, shift_x, vl)
1631+
#define __riscv_vsra_vx_i32m4(op1_v, shift_x, vl) __riscv_th_vsra_vx_i32m4(op1_v, shift_x, vl)
1632+
#define __riscv_vsra_vx_i32m8(op1_v, shift_x, vl) __riscv_th_vsra_vx_i32m8(op1_v, shift_x, vl)
1633+
#define __riscv_vsra_vx_i64m1(op1_v, shift_x, vl) __riscv_th_vsra_vx_i64m1(op1_v, shift_x, vl)
1634+
#define __riscv_vsra_vx_i64m2(op1_v, shift_x, vl) __riscv_th_vsra_vx_i64m2(op1_v, shift_x, vl)
1635+
#define __riscv_vsra_vx_i64m4(op1_v, shift_x, vl) __riscv_th_vsra_vx_i64m4(op1_v, shift_x, vl)
1636+
#define __riscv_vsra_vx_i64m8(op1_v, shift_x, vl) __riscv_th_vsra_vx_i64m8(op1_v, shift_x, vl)
1637+
1638+
}] in
1639+
def th_single_width_integer_bit_shift_wrapper_macros: RVVHeader;
1640+
1641+
// 12.6. Vector Narrowing Integer Right Shift Operations
1642+
1643+
// 12.7. Vector Integer Comparison Operations
1644+
1645+
// 12.8. Vector Integer Min/Max Operations
1646+
1647+
// 12.9. Vector Single-Width Integer Multiply Operations
1648+
1649+
// 12.10. Vector Integer Divide Operations
1650+
1651+
// 12.11. Vector Widening Integer Multiply Operations
1652+
1653+
// 12.12. Vector Single-Width Integer Multiply-Add Operations
1654+
1655+
// 12.13. Vector Widening Integer Multiply-Add Operations
1656+
1657+
// 12.14. Vector Integer Merge Operations
1658+
1659+
// 12.15. Vector Integer Move Operations
1660+

0 commit comments

Comments
 (0)