@@ -1495,3 +1495,166 @@ let HeaderCode =
1495
1495
1496
1496
}] in
1497
1497
def th_bitwise_logical_wrapper_macros: RVVHeader;
1498
+
1499
+ let HeaderCode =
1500
+ [{
1501
+ // Vector Single Width Integer Bit Shift Operations
1502
+ #define __riscv_vsll_vv_i8m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_i8m1(op1_v, shift_v, vl)
1503
+ #define __riscv_vsll_vv_i8m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_i8m2(op1_v, shift_v, vl)
1504
+ #define __riscv_vsll_vv_i8m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_i8m4(op1_v, shift_v, vl)
1505
+ #define __riscv_vsll_vv_i8m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_i8m8(op1_v, shift_v, vl)
1506
+ #define __riscv_vsll_vv_i16m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_i16m1(op1_v, shift_v, vl)
1507
+ #define __riscv_vsll_vv_i16m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_i16m2(op1_v, shift_v, vl)
1508
+ #define __riscv_vsll_vv_i16m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_i16m4(op1_v, shift_v, vl)
1509
+ #define __riscv_vsll_vv_i16m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_i16m8(op1_v, shift_v, vl)
1510
+ #define __riscv_vsll_vv_i32m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_i32m1(op1_v, shift_v, vl)
1511
+ #define __riscv_vsll_vv_i32m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_i32m2(op1_v, shift_v, vl)
1512
+ #define __riscv_vsll_vv_i32m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_i32m4(op1_v, shift_v, vl)
1513
+ #define __riscv_vsll_vv_i32m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_i32m8(op1_v, shift_v, vl)
1514
+ #define __riscv_vsll_vv_i64m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_i64m1(op1_v, shift_v, vl)
1515
+ #define __riscv_vsll_vv_i64m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_i64m2(op1_v, shift_v, vl)
1516
+ #define __riscv_vsll_vv_i64m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_i64m4(op1_v, shift_v, vl)
1517
+ #define __riscv_vsll_vv_i64m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_i64m8(op1_v, shift_v, vl)
1518
+
1519
+ #define __riscv_vsll_vv_u8m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_u8m1(op1_v, shift_v, vl)
1520
+ #define __riscv_vsll_vv_u8m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_u8m2(op1_v, shift_v, vl)
1521
+ #define __riscv_vsll_vv_u8m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_u8m4(op1_v, shift_v, vl)
1522
+ #define __riscv_vsll_vv_u8m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_u8m8(op1_v, shift_v, vl)
1523
+ #define __riscv_vsll_vv_u16m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_u16m1(op1_v, shift_v, vl)
1524
+ #define __riscv_vsll_vv_u16m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_u16m2(op1_v, shift_v, vl)
1525
+ #define __riscv_vsll_vv_u16m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_u16m4(op1_v, shift_v, vl)
1526
+ #define __riscv_vsll_vv_u16m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_u16m8(op1_v, shift_v, vl)
1527
+ #define __riscv_vsll_vv_u32m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_u32m1(op1_v, shift_v, vl)
1528
+ #define __riscv_vsll_vv_u32m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_u32m2(op1_v, shift_v, vl)
1529
+ #define __riscv_vsll_vv_u32m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_u32m4(op1_v, shift_v, vl)
1530
+ #define __riscv_vsll_vv_u32m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_u32m8(op1_v, shift_v, vl)
1531
+ #define __riscv_vsll_vv_u64m1(op1_v, shift_v, vl) __riscv_th_vsll_vv_u64m1(op1_v, shift_v, vl)
1532
+ #define __riscv_vsll_vv_u64m2(op1_v, shift_v, vl) __riscv_th_vsll_vv_u64m2(op1_v, shift_v, vl)
1533
+ #define __riscv_vsll_vv_u64m4(op1_v, shift_v, vl) __riscv_th_vsll_vv_u64m4(op1_v, shift_v, vl)
1534
+ #define __riscv_vsll_vv_u64m8(op1_v, shift_v, vl) __riscv_th_vsll_vv_u64m8(op1_v, shift_v, vl)
1535
+
1536
+ #define __riscv_vsll_vx_i8m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_i8m1(op1_v, shift_x, vl)
1537
+ #define __riscv_vsll_vx_i8m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_i8m2(op1_v, shift_x, vl)
1538
+ #define __riscv_vsll_vx_i8m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_i8m4(op1_v, shift_x, vl)
1539
+ #define __riscv_vsll_vx_i8m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_i8m8(op1_v, shift_x, vl)
1540
+ #define __riscv_vsll_vx_i16m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_i16m1(op1_v, shift_x, vl)
1541
+ #define __riscv_vsll_vx_i16m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_i16m2(op1_v, shift_x, vl)
1542
+ #define __riscv_vsll_vx_i16m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_i16m4(op1_v, shift_x, vl)
1543
+ #define __riscv_vsll_vx_i16m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_i16m8(op1_v, shift_x, vl)
1544
+ #define __riscv_vsll_vx_i32m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_i32m1(op1_v, shift_x, vl)
1545
+ #define __riscv_vsll_vx_i32m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_i32m2(op1_v, shift_x, vl)
1546
+ #define __riscv_vsll_vx_i32m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_i32m4(op1_v, shift_x, vl)
1547
+ #define __riscv_vsll_vx_i32m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_i32m8(op1_v, shift_x, vl)
1548
+ #define __riscv_vsll_vx_i64m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_i64m1(op1_v, shift_x, vl)
1549
+ #define __riscv_vsll_vx_i64m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_i64m2(op1_v, shift_x, vl)
1550
+ #define __riscv_vsll_vx_i64m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_i64m4(op1_v, shift_x, vl)
1551
+ #define __riscv_vsll_vx_i64m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_i64m8(op1_v, shift_x, vl)
1552
+
1553
+ #define __riscv_vsll_vx_u8m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_u8m1(op1_v, shift_x, vl)
1554
+ #define __riscv_vsll_vx_u8m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_u8m2(op1_v, shift_x, vl)
1555
+ #define __riscv_vsll_vx_u8m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_u8m4(op1_v, shift_x, vl)
1556
+ #define __riscv_vsll_vx_u8m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_u8m8(op1_v, shift_x, vl)
1557
+ #define __riscv_vsll_vx_u16m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_u16m1(op1_v, shift_x, vl)
1558
+ #define __riscv_vsll_vx_u16m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_u16m2(op1_v, shift_x, vl)
1559
+ #define __riscv_vsll_vx_u16m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_u16m4(op1_v, shift_x, vl)
1560
+ #define __riscv_vsll_vx_u16m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_u16m8(op1_v, shift_x, vl)
1561
+ #define __riscv_vsll_vx_u32m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_u32m1(op1_v, shift_x, vl)
1562
+ #define __riscv_vsll_vx_u32m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_u32m2(op1_v, shift_x, vl)
1563
+ #define __riscv_vsll_vx_u32m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_u32m4(op1_v, shift_x, vl)
1564
+ #define __riscv_vsll_vx_u32m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_u32m8(op1_v, shift_x, vl)
1565
+ #define __riscv_vsll_vx_u64m1(op1_v, shift_x, vl) __riscv_th_vsll_vx_u64m1(op1_v, shift_x, vl)
1566
+ #define __riscv_vsll_vx_u64m2(op1_v, shift_x, vl) __riscv_th_vsll_vx_u64m2(op1_v, shift_x, vl)
1567
+ #define __riscv_vsll_vx_u64m4(op1_v, shift_x, vl) __riscv_th_vsll_vx_u64m4(op1_v, shift_x, vl)
1568
+ #define __riscv_vsll_vx_u64m8(op1_v, shift_x, vl) __riscv_th_vsll_vx_u64m8(op1_v, shift_x, vl)
1569
+
1570
+ #define __riscv_vsrl_vv_u8m1(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u8m1(op1_v, shift_v, vl)
1571
+ #define __riscv_vsrl_vv_u8m2(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u8m2(op1_v, shift_v, vl)
1572
+ #define __riscv_vsrl_vv_u8m4(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u8m4(op1_v, shift_v, vl)
1573
+ #define __riscv_vsrl_vv_u8m8(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u8m8(op1_v, shift_v, vl)
1574
+ #define __riscv_vsrl_vv_u16m1(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u16m1(op1_v, shift_v, vl)
1575
+ #define __riscv_vsrl_vv_u16m2(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u16m2(op1_v, shift_v, vl)
1576
+ #define __riscv_vsrl_vv_u16m4(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u16m4(op1_v, shift_v, vl)
1577
+ #define __riscv_vsrl_vv_u16m8(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u16m8(op1_v, shift_v, vl)
1578
+ #define __riscv_vsrl_vv_u32m1(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u32m1(op1_v, shift_v, vl)
1579
+ #define __riscv_vsrl_vv_u32m2(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u32m2(op1_v, shift_v, vl)
1580
+ #define __riscv_vsrl_vv_u32m4(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u32m4(op1_v, shift_v, vl)
1581
+ #define __riscv_vsrl_vv_u32m8(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u32m8(op1_v, shift_v, vl)
1582
+ #define __riscv_vsrl_vv_u64m1(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u64m1(op1_v, shift_v, vl)
1583
+ #define __riscv_vsrl_vv_u64m2(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u64m2(op1_v, shift_v, vl)
1584
+ #define __riscv_vsrl_vv_u64m4(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u64m4(op1_v, shift_v, vl)
1585
+ #define __riscv_vsrl_vv_u64m8(op1_v, shift_v, vl) __riscv_th_vsrl_vv_u64m8(op1_v, shift_v, vl)
1586
+
1587
+ #define __riscv_vsrl_vx_u8m1(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u8m1(op1_v, shift_x, vl)
1588
+ #define __riscv_vsrl_vx_u8m2(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u8m2(op1_v, shift_x, vl)
1589
+ #define __riscv_vsrl_vx_u8m4(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u8m4(op1_v, shift_x, vl)
1590
+ #define __riscv_vsrl_vx_u8m8(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u8m8(op1_v, shift_x, vl)
1591
+ #define __riscv_vsrl_vx_u16m1(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u16m1(op1_v, shift_x, vl)
1592
+ #define __riscv_vsrl_vx_u16m2(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u16m2(op1_v, shift_x, vl)
1593
+ #define __riscv_vsrl_vx_u16m4(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u16m4(op1_v, shift_x, vl)
1594
+ #define __riscv_vsrl_vx_u16m8(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u16m8(op1_v, shift_x, vl)
1595
+ #define __riscv_vsrl_vx_u32m1(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u32m1(op1_v, shift_x, vl)
1596
+ #define __riscv_vsrl_vx_u32m2(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u32m2(op1_v, shift_x, vl)
1597
+ #define __riscv_vsrl_vx_u32m4(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u32m4(op1_v, shift_x, vl)
1598
+ #define __riscv_vsrl_vx_u32m8(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u32m8(op1_v, shift_x, vl)
1599
+ #define __riscv_vsrl_vx_u64m1(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u64m1(op1_v, shift_x, vl)
1600
+ #define __riscv_vsrl_vx_u64m2(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u64m2(op1_v, shift_x, vl)
1601
+ #define __riscv_vsrl_vx_u64m4(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u64m4(op1_v, shift_x, vl)
1602
+ #define __riscv_vsrl_vx_u64m8(op1_v, shift_x, vl) __riscv_th_vsrl_vx_u64m8(op1_v, shift_x, vl)
1603
+
1604
+ #define __riscv_vsra_vv_i8m1(op1_v, shift_v, vl) __riscv_th_vsra_vv_i8m1(op1_v, shift_v, vl)
1605
+ #define __riscv_vsra_vv_i8m2(op1_v, shift_v, vl) __riscv_th_vsra_vv_i8m2(op1_v, shift_v, vl)
1606
+ #define __riscv_vsra_vv_i8m4(op1_v, shift_v, vl) __riscv_th_vsra_vv_i8m4(op1_v, shift_v, vl)
1607
+ #define __riscv_vsra_vv_i8m8(op1_v, shift_v, vl) __riscv_th_vsra_vv_i8m8(op1_v, shift_v, vl)
1608
+ #define __riscv_vsra_vv_i16m1(op1_v, shift_v, vl) __riscv_th_vsra_vv_i16m1(op1_v, shift_v, vl)
1609
+ #define __riscv_vsra_vv_i16m2(op1_v, shift_v, vl) __riscv_th_vsra_vv_i16m2(op1_v, shift_v, vl)
1610
+ #define __riscv_vsra_vv_i16m4(op1_v, shift_v, vl) __riscv_th_vsra_vv_i16m4(op1_v, shift_v, vl)
1611
+ #define __riscv_vsra_vv_i16m8(op1_v, shift_v, vl) __riscv_th_vsra_vv_i16m8(op1_v, shift_v, vl)
1612
+ #define __riscv_vsra_vv_i32m1(op1_v, shift_v, vl) __riscv_th_vsra_vv_i32m1(op1_v, shift_v, vl)
1613
+ #define __riscv_vsra_vv_i32m2(op1_v, shift_v, vl) __riscv_th_vsra_vv_i32m2(op1_v, shift_v, vl)
1614
+ #define __riscv_vsra_vv_i32m4(op1_v, shift_v, vl) __riscv_th_vsra_vv_i32m4(op1_v, shift_v, vl)
1615
+ #define __riscv_vsra_vv_i32m8(op1_v, shift_v, vl) __riscv_th_vsra_vv_i32m8(op1_v, shift_v, vl)
1616
+ #define __riscv_vsra_vv_i64m1(op1_v, shift_v, vl) __riscv_th_vsra_vv_i64m1(op1_v, shift_v, vl)
1617
+ #define __riscv_vsra_vv_i64m2(op1_v, shift_v, vl) __riscv_th_vsra_vv_i64m2(op1_v, shift_v, vl)
1618
+ #define __riscv_vsra_vv_i64m4(op1_v, shift_v, vl) __riscv_th_vsra_vv_i64m4(op1_v, shift_v, vl)
1619
+ #define __riscv_vsra_vv_i64m8(op1_v, shift_v, vl) __riscv_th_vsra_vv_i64m8(op1_v, shift_v, vl)
1620
+
1621
+ #define __riscv_vsra_vx_i8m1(op1_v, shift_x, vl) __riscv_th_vsra_vx_i8m1(op1_v, shift_x, vl)
1622
+ #define __riscv_vsra_vx_i8m2(op1_v, shift_x, vl) __riscv_th_vsra_vx_i8m2(op1_v, shift_x, vl)
1623
+ #define __riscv_vsra_vx_i8m4(op1_v, shift_x, vl) __riscv_th_vsra_vx_i8m4(op1_v, shift_x, vl)
1624
+ #define __riscv_vsra_vx_i8m8(op1_v, shift_x, vl) __riscv_th_vsra_vx_i8m8(op1_v, shift_x, vl)
1625
+ #define __riscv_vsra_vx_i16m1(op1_v, shift_x, vl) __riscv_th_vsra_vx_i16m1(op1_v, shift_x, vl)
1626
+ #define __riscv_vsra_vx_i16m2(op1_v, shift_x, vl) __riscv_th_vsra_vx_i16m2(op1_v, shift_x, vl)
1627
+ #define __riscv_vsra_vx_i16m4(op1_v, shift_x, vl) __riscv_th_vsra_vx_i16m4(op1_v, shift_x, vl)
1628
+ #define __riscv_vsra_vx_i16m8(op1_v, shift_x, vl) __riscv_th_vsra_vx_i16m8(op1_v, shift_x, vl)
1629
+ #define __riscv_vsra_vx_i32m1(op1_v, shift_x, vl) __riscv_th_vsra_vx_i32m1(op1_v, shift_x, vl)
1630
+ #define __riscv_vsra_vx_i32m2(op1_v, shift_x, vl) __riscv_th_vsra_vx_i32m2(op1_v, shift_x, vl)
1631
+ #define __riscv_vsra_vx_i32m4(op1_v, shift_x, vl) __riscv_th_vsra_vx_i32m4(op1_v, shift_x, vl)
1632
+ #define __riscv_vsra_vx_i32m8(op1_v, shift_x, vl) __riscv_th_vsra_vx_i32m8(op1_v, shift_x, vl)
1633
+ #define __riscv_vsra_vx_i64m1(op1_v, shift_x, vl) __riscv_th_vsra_vx_i64m1(op1_v, shift_x, vl)
1634
+ #define __riscv_vsra_vx_i64m2(op1_v, shift_x, vl) __riscv_th_vsra_vx_i64m2(op1_v, shift_x, vl)
1635
+ #define __riscv_vsra_vx_i64m4(op1_v, shift_x, vl) __riscv_th_vsra_vx_i64m4(op1_v, shift_x, vl)
1636
+ #define __riscv_vsra_vx_i64m8(op1_v, shift_x, vl) __riscv_th_vsra_vx_i64m8(op1_v, shift_x, vl)
1637
+
1638
+ }] in
1639
+ def th_single_width_integer_bit_shift_wrapper_macros: RVVHeader;
1640
+
1641
+ // 12.6. Vector Narrowing Integer Right Shift Operations
1642
+
1643
+ // 12.7. Vector Integer Comparison Operations
1644
+
1645
+ // 12.8. Vector Integer Min/Max Operations
1646
+
1647
+ // 12.9. Vector Single-Width Integer Multiply Operations
1648
+
1649
+ // 12.10. Vector Integer Divide Operations
1650
+
1651
+ // 12.11. Vector Widening Integer Multiply Operations
1652
+
1653
+ // 12.12. Vector Single-Width Integer Multiply-Add Operations
1654
+
1655
+ // 12.13. Vector Widening Integer Multiply-Add Operations
1656
+
1657
+ // 12.14. Vector Integer Merge Operations
1658
+
1659
+ // 12.15. Vector Integer Move Operations
1660
+
0 commit comments