@@ -701,6 +701,11 @@ natural alignment.
701
701
702
702
Load a ` v128 ` vector from the given heap address.
703
703
704
+ ``` python
705
+ def S.load(memarg):
706
+ return S.from_bytes(memory[memarg.offset:memarg.offset + 16 ])
707
+ ```
708
+
704
709
### Load and Splat
705
710
706
711
* ` v8x16.load_splat(memarg) -> v128 `
@@ -710,6 +715,12 @@ Load a `v128` vector from the given heap address.
710
715
711
716
Load a single element and splat to all lanes of a ` v128 ` vector.
712
717
718
+ ``` python
719
+ def S.load_splat(memarg):
720
+ val_bytes = memory[memarg.offset:memarg.offset + S.LaneBytes])
721
+ return S.splat(S.LaneType.from_bytes(val_bytes))
722
+ ```
723
+
713
724
### Load and Extend
714
725
715
726
* ` i16x8.load8x8_s(memarg) -> v128 ` : load eight 8-bit integers and sign extend each one to a 16-bit lane
@@ -721,12 +732,32 @@ Load a single element and splat to all lanes of a `v128` vector.
721
732
722
733
Fetch consequtive integers up to 32-bit wide and produce a vector with lanes up to 64 bits.
723
734
735
+ ``` python
736
+ def S.load_extend(ext, memarg):
737
+ result = S.New()
738
+ bytes = memory[memarg.offset:memarg.offset + 8 ])
739
+ for i in range (S.Lanes):
740
+ result[i] = ext(S.LaneType.from_bytes(bytes[(i * S.LaneBytes/ 2 ):((i+ 1 ) * S.LaneBytes/ 2 )]))
741
+ return result
742
+
743
+ def S.load_extend_s(memarg):
744
+ return S.load_extend(Sext, memarg)
745
+
746
+ def S.load_extend_u(memarg):
747
+ return S.load_extend(Zext, memarg)
748
+ ```
749
+
724
750
### Store
725
751
726
752
* ` v128.store(memarg, data: v128) `
727
753
728
754
Store a ` v128 ` vector to the given heap address.
729
755
756
+ ``` python
757
+ def S.store(memarg, a):
758
+ memory[memarg.offset:memarg.offset + 16 ] = bytes (a)
759
+ ```
760
+
730
761
## Floating-point sign bit operations
731
762
732
763
These floating point operations are simple manipulations of the sign bit. No
0 commit comments