@@ -447,6 +447,304 @@ Numeric Instructions
447
447
\end {array}
448
448
449
449
450
+ .. index :: simd instruction
451
+ pair: text format; instruction
452
+ .. _text-instr-simd :
453
+
454
+ SIMD Instructions
455
+ ~~~~~~~~~~~~~~~~~~~~
456
+
457
+ SIMD memory instructions have optional offset and alignment immediates, like the :ref: `memory instructions <text-memarg >`.
458
+
459
+ .. math ::
460
+ \begin {array}{llclll}
461
+ \production {instruction} & \Tplaininstr _I &::=& \dots \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\ &&|&
462
+ \text {v128 .load}~~m{:}\Tmemarg _{16 } &\Rightarrow & \V128 .\LOAD ~m \\ &&|&
463
+ \text {i16 x8 .load8 x8 \_s}~~m{:}\Tmemarg _8 &\Rightarrow & \I16 X8 .\LOAD\K {8 x8 \_s}~m \\ &&|&
464
+ \text {i16 x8 .load8 x8 \_u}~~m{:}\Tmemarg _8 &\Rightarrow & \I16 X8 .\LOAD\K {8 x8 \_u}~m \\ &&|&
465
+ \text {i32 x4 .load16 x4 \_s}~~m{:}\Tmemarg _8 &\Rightarrow & \I32 X4 .\LOAD\K {16 x4 \_s}~m \\ &&|&
466
+ \text {i32 x4 .load16 x4 \_u}~~m{:}\Tmemarg _8 &\Rightarrow & \I32 X4 .\LOAD\K {16 x4 \_u}~m \\ &&|&
467
+ \text {i64 x2 .load32 x2 \_s}~~m{:}\Tmemarg _8 &\Rightarrow & \I64 X2 .\LOAD\K {32 x2 \_s}~m \\ &&|&
468
+ \text {i64 x2 .load32 x2 \_u}~~m{:}\Tmemarg _8 &\Rightarrow & \I64 X2 .\LOAD\K {32 x2 \_u}~m \\ &&|&
469
+ \text {i8 x16 .load\_splat}~~m{:}\Tmemarg _1 &\Rightarrow & \I8 X16 .\LOAD\K {\_splat}~m \\ &&|&
470
+ \text {i16 x8 .load\_splat}~~m{:}\Tmemarg _2 &\Rightarrow & \I16 X8 .\LOAD\K {\_splat}~m \\ &&|&
471
+ \text {i32 x4 .load\_splat}~~m{:}\Tmemarg _4 &\Rightarrow & \I32 X4 .\LOAD\K {\_splat}~m \\ &&|&
472
+ \text {i64 x2 .load\_splat}~~m{:}\Tmemarg _8 &\Rightarrow & \I64 X2 .\LOAD\K {\_splat}~m \\ &&|&
473
+ \text {v128 .store}~~m{:}\Tmemarg _{16 } &\Rightarrow & \V128 .\STORE ~m \\
474
+ \end {array}
475
+
476
+ SIMD const instructions have a mandatory :ref: `shape <syntax-simd-shape >` descriptor, which determines how the following values are parsed.
477
+
478
+ .. math ::
479
+ \begin {array}{llclll}
480
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
481
+ \text {v128 .const}~~\text {i8 x16 }~~(n{:}\Ti8 )^{16 } &\Rightarrow & \V128 .\VCONST ~\bytes _{i128 }^{-1 }(\bytes _{i8 }(n^{16 })) \\ &&|&
482
+ \text {v128 .const}~~\text {i16 x8 }~~(n{:}\Ti16 )^{8 } &\Rightarrow & \V128 .\VCONST ~\bytes _{i128 }^{-1 }(\bytes _{i16 }(n^8 )) \\ &&|&
483
+ \text {v128 .const}~~\text {i32 x4 }~~(n{:}\Ti32 )^{4 } &\Rightarrow & \V128 .\VCONST ~\bytes _{i128 }^{-1 }(\bytes _{i32 }(n^4 )) \\ &&|&
484
+ \text {v128 .const}~~\text {i64 x2 }~~(n{:}\Ti64 )^{2 } &\Rightarrow & \V128 .\VCONST ~\bytes _{i128 }^{-1 }(\bytes _{i64 }(n^2 )) \\ &&|&
485
+ \text {v128 .const}~~\text {f32 x4 }~~(z{:}\Tf32 )^{4 } &\Rightarrow & \V128 .\VCONST ~\bytes _{i128 }^{-1 }(\bytes _{f32 }(z^4 )) \\ &&|&
486
+ \text {v128 .const}~~\text {f64 x2 }~~(z{:}\Tf64 )^{2 } &\Rightarrow & \V128 .\VCONST ~\bytes _{i128 }^{-1 }(\bytes _{f64 }(z^2 )) \\ &&|&
487
+ \text {i8 x16 .shuffle}~~(laneidx{:}\Tu8 )^{16 } &\Rightarrow & \I8 X16 .\SHUFFLE ~laneidx^{16 } \\ &&|&
488
+ \text {i8 x16 .swizzle} &\Rightarrow & \I8 X16 .\SWIZZLE \\
489
+ \end {array}
490
+
491
+ .. math ::
492
+ \begin {array}{llclll}
493
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
494
+ \text {i8 x16 .splat} &\Rightarrow & \I8 X16 .\SPLAT \\ &&|&
495
+ \text {i16 x8 .splat} &\Rightarrow & \I16 X8 .\SPLAT \\ &&|&
496
+ \text {i32 x4 .splat} &\Rightarrow & \I32 X4 .\SPLAT \\ &&|&
497
+ \text {i64 x2 .splat} &\Rightarrow & \I64 X2 .\SPLAT \\ &&|&
498
+ \text {f32 x4 .splat} &\Rightarrow & \F32 X4 .\SPLAT \\ &&|&
499
+ \text {f64 x2 .splat} &\Rightarrow & \F64 X2 .\SPLAT \\
500
+ \end {array}
501
+
502
+ .. math ::
503
+ \begin {array}{llclll}
504
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
505
+ \text {i8 x16 .extract\_lane\_s}~~laneidx{:}\Tu8 &\Rightarrow & \I8 X16 .\EXTRACTLANE\K {\_s}~laneidx \\ &&|&
506
+ \text {i8 x16 .extract\_lane\_u}~~laneidx{:}\Tu8 &\Rightarrow & \I8 X16 .\EXTRACTLANE\K {\_u}~laneidx \\ &&|&
507
+ \text {i8 x16 .replace\_lane}~~laneidx{:}\Tu8 &\Rightarrow & \I8 X16 .\REPLACELANE ~laneidx \\ &&|&
508
+ \text {i16 x8 .extract\_lane\_s}~~laneidx{:}\Tu8 &\Rightarrow & \I16 X8 .\EXTRACTLANE\K {\_s}~laneidx \\ &&|&
509
+ \text {i16 x8 .extract\_lane\_u}~~laneidx{:}\Tu8 &\Rightarrow & \I16 X8 .\EXTRACTLANE\K {\_u}~laneidx \\ &&|&
510
+ \text {i16 x8 .replace\_lane}~~laneidx{:}\Tu8 &\Rightarrow & \I16 X8 .\REPLACELANE ~laneidx \\ &&|&
511
+ \text {i32 x4 .extract\_lane}~~laneidx{:}\Tu8 &\Rightarrow & \I32 X4 .\EXTRACTLANE ~laneidx \\ &&|&
512
+ \text {i32 x4 .replace\_lane}~~laneidx{:}\Tu8 &\Rightarrow & \I32 X4 .\REPLACELANE ~laneidx \\ &&|&
513
+ \text {i64 x2 .extract\_lane}~~laneidx{:}\Tu8 &\Rightarrow & \I64 X2 .\EXTRACTLANE ~laneidx \\ &&|&
514
+ \text {i64 x2 .replace\_lane}~~laneidx{:}\Tu8 &\Rightarrow & \I64 X2 .\REPLACELANE ~laneidx \\ &&|&
515
+ \text {f32 x4 .extract\_lane}~~laneidx{:}\Tu8 &\Rightarrow & \F32 X4 .\EXTRACTLANE ~laneidx \\ &&|&
516
+ \text {f32 x4 .replace\_lane}~~laneidx{:}\Tu8 &\Rightarrow & \F32 X4 .\REPLACELANE ~laneidx \\ &&|&
517
+ \text {f64 x2 .extract\_lane}~~laneidx{:}\Tu8 &\Rightarrow & \F64 X2 .\EXTRACTLANE ~laneidx \\ &&|&
518
+ \text {f64 x2 .replace\_lane}~~laneidx{:}\Tu8 &\Rightarrow & \F64 X2 .\REPLACELANE ~laneidx \\
519
+ \end {array}
520
+
521
+ .. _text-virelop :
522
+
523
+ .. math ::
524
+ \begin {array}{llclll}
525
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
526
+ \text {i8 x16 .eq} &\Rightarrow & \I8 X16 .\VEQ \\ &&|&
527
+ \text {i8 x16 .ne} &\Rightarrow & \I8 X16 .\VNE \\ &&|&
528
+ \text {i8 x16 .lt\_s} &\Rightarrow & \I8 X16 .\VLT\K {\_s}\\ &&|&
529
+ \text {i8 x16 .lt\_u} &\Rightarrow & \I8 X16 .\VLT\K {\_u}\\ &&|&
530
+ \text {i8 x16 .gt\_s} &\Rightarrow & \I8 X16 .\VGT\K {\_s}\\ &&|&
531
+ \text {i8 x16 .gt\_u} &\Rightarrow & \I8 X16 .\VGT\K {\_u}\\ &&|&
532
+ \text {i8 x16 .le\_s} &\Rightarrow & \I8 X16 .\VLE\K {\_s}\\ &&|&
533
+ \text {i8 x16 .le\_u} &\Rightarrow & \I8 X16 .\VLE\K {\_u}\\ &&|&
534
+ \text {i8 x16 .ge\_s} &\Rightarrow & \I8 X16 .\VGE\K {\_s}\\ &&|&
535
+ \text {i8 x16 .ge\_u} &\Rightarrow & \I8 X16 .\VGE\K {\_u}\\
536
+ \end {array}
537
+
538
+ .. math ::
539
+ \begin {array}{llclll}
540
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
541
+ \text {i16 x8 .eq} &\Rightarrow & \I16 X8 .\VEQ \\ &&|&
542
+ \text {i16 x8 .ne} &\Rightarrow & \I16 X8 .\VNE \\ &&|&
543
+ \text {i16 x8 .lt\_s} &\Rightarrow & \I16 X8 .\VLT\K {\_s}\\ &&|&
544
+ \text {i16 x8 .lt\_u} &\Rightarrow & \I16 X8 .\VLT\K {\_u}\\ &&|&
545
+ \text {i16 x8 .gt\_s} &\Rightarrow & \I16 X8 .\VGT\K {\_s}\\ &&|&
546
+ \text {i16 x8 .gt\_u} &\Rightarrow & \I16 X8 .\VGT\K {\_u}\\ &&|&
547
+ \text {i16 x8 .le\_s} &\Rightarrow & \I16 X8 .\VLE\K {\_s}\\ &&|&
548
+ \text {i16 x8 .le\_u} &\Rightarrow & \I16 X8 .\VLE\K {\_u}\\ &&|&
549
+ \text {i16 x8 .ge\_s} &\Rightarrow & \I16 X8 .\VGE\K {\_s}\\ &&|&
550
+ \text {i16 x8 .ge\_u} &\Rightarrow & \I16 X8 .\VGE\K {\_u}\\
551
+ \end {array}
552
+
553
+ .. math ::
554
+ \begin {array}{llclll}
555
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
556
+ \text {i32 x4 .eq} &\Rightarrow & \I32 X4 .\VEQ \\ &&|&
557
+ \text {i32 x4 .ne} &\Rightarrow & \I32 X4 .\VNE \\ &&|&
558
+ \text {i32 x4 .lt\_s} &\Rightarrow & \I32 X4 .\VLT\K {\_s}\\ &&|&
559
+ \text {i32 x4 .lt\_u} &\Rightarrow & \I32 X4 .\VLT\K {\_u}\\ &&|&
560
+ \text {i32 x4 .gt\_s} &\Rightarrow & \I32 X4 .\VGT\K {\_s}\\ &&|&
561
+ \text {i32 x4 .gt\_u} &\Rightarrow & \I32 X4 .\VGT\K {\_u}\\ &&|&
562
+ \text {i32 x4 .le\_s} &\Rightarrow & \I32 X4 .\VLE\K {\_s}\\ &&|&
563
+ \text {i32 x4 .le\_u} &\Rightarrow & \I32 X4 .\VLE\K {\_u}\\ &&|&
564
+ \text {i32 x4 .ge\_s} &\Rightarrow & \I32 X4 .\VGE\K {\_s}\\ &&|&
565
+ \text {i32 x4 .ge\_u} &\Rightarrow & \I32 X4 .\VGE\K {\_u}\\
566
+ \end {array}
567
+
568
+ .. _text-vfrelop :
569
+
570
+ .. math ::
571
+ \begin {array}{llclll}
572
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
573
+ \text {f32 x4 .eq} &\Rightarrow & \F32 X4 .\VEQ \\ &&|&
574
+ \text {f32 x4 .ne} &\Rightarrow & \F32 X4 .\VNE \\ &&|&
575
+ \text {f32 x4 .lt} &\Rightarrow & \F32 X4 .\VLT \\ &&|&
576
+ \text {f32 x4 .gt} &\Rightarrow & \F32 X4 .\VGT \\ &&|&
577
+ \text {f32 x4 .le} &\Rightarrow & \F32 X4 .\VLE \\ &&|&
578
+ \text {f32 x4 .ge} &\Rightarrow & \F32 X4 .\VGE \\
579
+ \end {array}
580
+
581
+ .. math ::
582
+ \begin {array}{llclll}
583
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
584
+ \text {f64 x2 .eq} &\Rightarrow & \F64 X2 .\VEQ \\ &&|&
585
+ \text {f64 x2 .ne} &\Rightarrow & \F64 X2 .\VNE \\ &&|&
586
+ \text {f64 x2 .lt} &\Rightarrow & \F64 X2 .\VLT \\ &&|&
587
+ \text {f64 x2 .gt} &\Rightarrow & \F64 X2 .\VGT \\ &&|&
588
+ \text {f64 x2 .le} &\Rightarrow & \F64 X2 .\VLE \\ &&|&
589
+ \text {f64 x2 .ge} &\Rightarrow & \F64 X2 .\VGE \\
590
+ \end {array}
591
+
592
+ .. _text-vsunop :
593
+ .. _text-vsbinop :
594
+ .. _text-vsternop :
595
+
596
+ .. math ::
597
+ \begin {array}{llclll}
598
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
599
+ \text {v128 .not} &\Rightarrow & \V128 .\VNOT \\ &&|&
600
+ \text {v128 .and} &\Rightarrow & \V128 .\VAND \\ &&|&
601
+ \text {v128 .andnot} &\Rightarrow & \V128 .\VANDNOT \\ &&|&
602
+ \text {v128 .or} &\Rightarrow & \V128 .\VOR \\ &&|&
603
+ \text {v128 .xor} &\Rightarrow & \V128 .\VXOR \\ &&|&
604
+ \text {v128 .bitselect} &\Rightarrow & \V128 .\BITSELECT
605
+ \end {array}
606
+
607
+ .. _text-vtestop :
608
+ .. _text-vshiftop :
609
+ .. _text-viunop :
610
+ .. _text-vibinop :
611
+ .. _text-viminmaxop :
612
+ .. _text-vsatbinop :
613
+
614
+ .. math ::
615
+ \begin {array}{llclll}
616
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
617
+ \text {i8 x16 .abs} &\Rightarrow & \I8 X16 .\VABS \\ &&|&
618
+ \text {i8 x16 .neg} &\Rightarrow & \I8 X16 .\VNEG \\ &&|&
619
+ \text {i8 x16 .any\_true} &\Rightarrow & \I8 X16 .\ANYTRUE \\ &&|&
620
+ \text {i8 x16 .all\_true} &\Rightarrow & \I8 X16 .\ALLTRUE \\ &&|&
621
+ \text {i8 x16 .bitmask} &\Rightarrow & \I8 X16 .\BITMASK \\ &&|&
622
+ \text {i8 x16 .narrow\_i16 x8 \_s} &\Rightarrow & \I8 X16 .\NARROW\K {\_i16 x8 \_s}\\ &&|&
623
+ \text {i8 x16 .narrow\_i16 x8 \_u} &\Rightarrow & \I8 X16 .\NARROW\K {\_i16 x8 \_u}\\ &&|&
624
+ \text {i8 x16 .shl} &\Rightarrow & \I8 X16 .\VSHL \\ &&|&
625
+ \text {i8 x16 .shr\_s} &\Rightarrow & \I8 X16 .\VSHR\K {\_s}\\ &&|&
626
+ \text {i8 x16 .shr\_u} &\Rightarrow & \I8 X16 .\VSHR\K {\_u}\\ &&|&
627
+ \text {i8 x16 .add} &\Rightarrow & \I8 X16 .\VADD \\ &&|&
628
+ \text {i8 x16 .add\_sat\_s} &\Rightarrow & \I8 X16 .\VADD\K {\_sat\_s}\\ &&|&
629
+ \text {i8 x16 .add\_sat\_u} &\Rightarrow & \I8 X16 .\VADD\K {\_sat\_u}\\ &&|&
630
+ \text {i8 x16 .sub} &\Rightarrow & \I8 X16 .\VSUB \\ &&|&
631
+ \text {i8 x16 .sub\_sat\_s} &\Rightarrow & \I8 X16 .\VSUB\K {\_sat\_s}\\ &&|&
632
+ \text {i8 x16 .sub\_sat\_u} &\Rightarrow & \I8 X16 .\VSUB\K {\_sat\_u}\\ &&|&
633
+ \text {i8 x16 .min\_s} &\Rightarrow & \I8 X16 .\VMIN\K {\_s}\\ &&|&
634
+ \text {i8 x16 .min\_u} &\Rightarrow & \I8 X16 .\VMIN\K {\_u}\\ &&|&
635
+ \text {i8 x16 .max\_s} &\Rightarrow & \I8 X16 .\VMAX\K {\_s}\\ &&|&
636
+ \text {i8 x16 .max\_u} &\Rightarrow & \I8 X16 .\VMAX\K {\_u}\\ &&|&
637
+ \text {i8 x16 .avgr\_u} &\Rightarrow & \I8 X16 .\AVGR\K {\_u}\\
638
+ \end {array}
639
+
640
+ .. math ::
641
+ \begin {array}{llclll}
642
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
643
+ \text {i16 x8 .abs} &\Rightarrow & \I16 X8 .\VABS \\ &&|&
644
+ \text {i16 x8 .neg} &\Rightarrow & \I16 X8 .\VNEG \\ &&|&
645
+ \text {i16 x8 .any\_true} &\Rightarrow & \I16 X8 .\ANYTRUE \\ &&|&
646
+ \text {i16 x8 .all\_true} &\Rightarrow & \I16 X8 .\ALLTRUE \\ &&|&
647
+ \text {i16 x8 .bitmask} &\Rightarrow & \I16 X8 .\BITMASK \\ &&|&
648
+ \text {i16 x8 .narrow\_i32 x4 \_s} &\Rightarrow & \I16 X8 .\NARROW\K {\_i32 x4 \_s}\\ &&|&
649
+ \text {i16 x8 .narrow\_i32 x4 \_u} &\Rightarrow & \I16 X8 .\NARROW\K {\_i32 x4 \_u}\\ &&|&
650
+ \text {i16 x8 .widen\_low\_i8 x16 \_s} &\Rightarrow & \I16 X8 .\WIDEN\K {\_low\_i8 x16 \_s}\\ &&|&
651
+ \text {i16 x8 .widen\_high\_i8 x16 \_s} &\Rightarrow & \I16 X8 .\WIDEN\K {\_high\_i8 x16 \_s}\\ &&|&
652
+ \text {i16 x8 .widen\_low\_i8 x16 \_u} &\Rightarrow & \I16 X8 .\WIDEN\K {\_low\_i8 x16 \_u}\\ &&|&
653
+ \text {i16 x8 .widen\_high\_i8 x16 \_u} &\Rightarrow & \I16 X8 .\WIDEN\K {\_high\_i8 x16 \_u}\\ &&|&
654
+ \text {i16 x8 .shl} &\Rightarrow & \I16 X8 .\VSHL \\ &&|&
655
+ \text {i16 x8 .shr\_s} &\Rightarrow & \I16 X8 .\VSHR\K {\_s}\\ &&|&
656
+ \text {i16 x8 .shr\_u} &\Rightarrow & \I16 X8 .\VSHR\K {\_u}\\ &&|&
657
+ \text {i16 x8 .add} &\Rightarrow & \I16 X8 .\VADD \\ &&|&
658
+ \text {i16 x8 .add\_sat\_s} &\Rightarrow & \I16 X8 .\VADD\K {\_sat\_s}\\ &&|&
659
+ \text {i16 x8 .add\_sat\_u} &\Rightarrow & \I16 X8 .\VADD\K {\_sat\_u}\\ &&|&
660
+ \text {i16 x8 .sub} &\Rightarrow & \I16 X8 .\VSUB \\ &&|&
661
+ \text {i16 x8 .sub\_sat\_s} &\Rightarrow & \I16 X8 .\VSUB\K {\_sat\_s}\\ &&|&
662
+ \text {i16 x8 .sub\_sat\_u} &\Rightarrow & \I16 X8 .\VSUB\K {\_sat\_u}\\ &&|&
663
+ \text {i16 x8 .mul} &\Rightarrow & \I16 X8 .\VMUL \\ &&|&
664
+ \text {i16 x8 .min\_s} &\Rightarrow & \I16 X8 .\VMIN\K {\_s}\\ &&|&
665
+ \text {i16 x8 .min\_u} &\Rightarrow & \I16 X8 .\VMIN\K {\_u}\\ &&|&
666
+ \text {i16 x8 .max\_s} &\Rightarrow & \I16 X8 .\VMAX\K {\_s}\\ &&|&
667
+ \text {i16 x8 .max\_u} &\Rightarrow & \I16 X8 .\VMAX\K {\_u}\\ &&|&
668
+ \text {i16 x8 .avgr\_u} &\Rightarrow & \I16 X8 .\AVGR\K {\_u}\\
669
+ \end {array}
670
+
671
+ .. math ::
672
+ \begin {array}{llclll}
673
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
674
+ \text {i32 x4 .abs} &\Rightarrow & \I32 X4 .\VABS \\ &&|&
675
+ \text {i32 x4 .neg} &\Rightarrow & \I32 X4 .\VNEG \\ &&|&
676
+ \text {i32 x4 .any\_true} &\Rightarrow & \I32 X4 .\ANYTRUE \\ &&|&
677
+ \text {i32 x4 .all\_true} &\Rightarrow & \I32 X4 .\ALLTRUE \\ &&|&
678
+ \text {i32 x4 .bitmask} &\Rightarrow & \I32 X4 .\BITMASK \\ &&|&
679
+ \text {i32 x4 .widen\_low\_i16 x8 \_s} &\Rightarrow & \I32 X4 .\WIDEN\K {\_low\_i16 x8 \_s}\\ &&|&
680
+ \text {i32 x4 .widen\_high\_i16 x8 \_s} &\Rightarrow & \I32 X4 .\WIDEN\K {\_high\_i16 x8 \_s}\\ &&|&
681
+ \text {i32 x4 .widen\_low\_i16 x8 \_u} &\Rightarrow & \I32 X4 .\WIDEN\K {\_low\_i16 x8 \_u}\\ &&|&
682
+ \text {i32 x4 .widen\_high\_i16 x8 \_u} &\Rightarrow & \I32 X4 .\WIDEN\K {\_high\_i16 x8 \_u}\\ &&|&
683
+ \text {i32 x4 .shl} &\Rightarrow & \I32 X4 .\VSHL \\ &&|&
684
+ \text {i32 x4 .shr\_s} &\Rightarrow & \I32 X4 .\VSHR\K {\_s}\\ &&|&
685
+ \text {i32 x4 .shr\_u} &\Rightarrow & \I32 X4 .\VSHR\K {\_u}\\ &&|&
686
+ \text {i32 x4 .add} &\Rightarrow & \I32 X4 .\VADD \\ &&|&
687
+ \text {i32 x4 .sub} &\Rightarrow & \I32 X4 .\VSUB \\ &&|&
688
+ \text {i32 x4 .mul} &\Rightarrow & \I32 X4 .\VMUL \\ &&|&
689
+ \text {i32 x4 .min\_s} &\Rightarrow & \I32 X4 .\VMIN\K {\_s}\\ &&|&
690
+ \text {i32 x4 .min\_u} &\Rightarrow & \I32 X4 .\VMIN\K {\_u}\\ &&|&
691
+ \text {i32 x4 .max\_s} &\Rightarrow & \I32 X4 .\VMAX\K {\_s}\\ &&|&
692
+ \text {i32 x4 .max\_u} &\Rightarrow & \I32 X4 .\VMAX\K {\_u}\\
693
+ \end {array}
694
+
695
+ .. math ::
696
+ \begin {array}{llclll}
697
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
698
+ \text {i64 x2 .neg} &\Rightarrow & \I64 X2 .\VNEG \\ &&|&
699
+ \text {i64 x2 .shl} &\Rightarrow & \I64 X2 .\VSHL \\ &&|&
700
+ \text {i64 x2 .shr\_s} &\Rightarrow & \I64 X2 .\VSHR\K {\_s}\\ &&|&
701
+ \text {i64 x2 .shr\_u} &\Rightarrow & \I64 X2 .\VSHR\K {\_u}\\ &&|&
702
+ \text {i64 x2 .add} &\Rightarrow & \I64 X2 .\VADD \\ &&|&
703
+ \text {i64 x2 .sub} &\Rightarrow & \I64 X2 .\VSUB \\ &&|&
704
+ \text {i64 x2 .mul} &\Rightarrow & \I64 X2 .\VMUL \\
705
+ \end {array}
706
+
707
+ .. _text-vfunop :
708
+ .. _text-vfbinop :
709
+
710
+ .. math ::
711
+ \begin {array}{llclll}
712
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
713
+ \text {f32 x4 .abs} &\Rightarrow & \F32 X4 .\VABS \\ &&|&
714
+ \text {f32 x4 .neg} &\Rightarrow & \F32 X4 .\VNEG \\ &&|&
715
+ \text {f32 x4 .sqrt} &\Rightarrow & \F32 X4 .\VSQRT \\ &&|&
716
+ \text {f32 x4 .add} &\Rightarrow & \F32 X4 .\VADD \\ &&|&
717
+ \text {f32 x4 .sub} &\Rightarrow & \F32 X4 .\VSUB \\ &&|&
718
+ \text {f32 x4 .mul} &\Rightarrow & \F32 X4 .\VMUL \\ &&|&
719
+ \text {f32 x4 .div} &\Rightarrow & \F32 X4 .\VDIV \\ &&|&
720
+ \text {f32 x4 .min} &\Rightarrow & \F32 X4 .\VMIN \\ &&|&
721
+ \text {f32 x4 .max} &\Rightarrow & \F32 X4 .\VMAX \\
722
+ \end {array}
723
+
724
+ .. math ::
725
+ \begin {array}{llclll}
726
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
727
+ \text {f64 x2 .abs} &\Rightarrow & \F64 X2 .\VABS \\ &&|&
728
+ \text {f64 x2 .neg} &\Rightarrow & \F64 X2 .\VNEG \\ &&|&
729
+ \text {f64 x2 .sqrt} &\Rightarrow & \F64 X2 .\VSQRT \\ &&|&
730
+ \text {f64 x2 .add} &\Rightarrow & \F64 X2 .\VADD \\ &&|&
731
+ \text {f64 x2 .sub} &\Rightarrow & \F64 X2 .\VSUB \\ &&|&
732
+ \text {f64 x2 .mul} &\Rightarrow & \F64 X2 .\VMUL \\ &&|&
733
+ \text {f64 x2 .div} &\Rightarrow & \F64 X2 .\VDIV \\ &&|&
734
+ \text {f64 x2 .min} &\Rightarrow & \F64 X2 .\VMIN \\ &&|&
735
+ \text {f64 x2 .max} &\Rightarrow & \F64 X2 .\VMAX \\
736
+ \end {array}
737
+
738
+ .. math ::
739
+ \begin {array}{llclll}
740
+ \phantom {\production {instruction}} & \phantom {\Tplaininstr _I} &\phantom {::=}& \phantom {averylonginstructionnameforsimdtext} && \phantom {simdhasreallylonginstructionnames} \\[-2 ex] &&|&
741
+ \text {i32 x4 .trunc\_sat\_f32 x4 \_s} &\Rightarrow & \I32 X4 .\TRUNC\K {\_sat\_f32 x4 \_s}\\ &&|&
742
+ \text {i32 x4 .trunc\_sat\_f32 x4 \_u} &\Rightarrow & \I32 X4 .\TRUNC\K {\_sat\_f32 x4 \_u}\\ &&|&
743
+ \text {f32 x4 .convert\_i32 x4 \_s} &\Rightarrow & \F32 X4 .\CONVERT\K {\_i32 x4 \_s}\\ &&|&
744
+ \text {f32 x4 .convert\_i32 x4 \_u} &\Rightarrow & \F32 X4 .\CONVERT\K {\_i32 x4 \_u}\\
745
+ \end {array}
746
+
747
+
450
748
.. index :: ! folded instruction, S-expression
451
749
.. _text-foldedinstr :
452
750
0 commit comments