diff --git a/interpreter/binary/decode.ml b/interpreter/binary/decode.ml index 3e76724e0..e1764be68 100644 --- a/interpreter/binary/decode.ml +++ b/interpreter/binary/decode.ml @@ -365,7 +365,11 @@ let simd_prefix s = | 0x97l -> i16x8_min_u | 0x98l -> i16x8_max_s | 0x99l -> i16x8_max_u + | 0x9al -> i16x8_extmul_low_i8x16_s | 0x9bl -> i16x8_avgr_u + | 0x9dl -> i16x8_extmul_high_i8x16_s + | 0x9el -> i16x8_extmul_low_i8x16_u + | 0x9fl -> i16x8_extmul_high_i8x16_u | 0xa0l -> i32x4_abs | 0xa1l -> i32x4_neg | 0xa3l -> i32x4_all_true @@ -385,6 +389,10 @@ let simd_prefix s = | 0xb8l -> i32x4_max_s | 0xb9l -> i32x4_max_u | 0xbal -> i32x4_dot_i16x8_s + | 0xbbl -> i32x4_extmul_low_i16x8_s + | 0xbdl -> i32x4_extmul_high_i16x8_s + | 0xbel -> i32x4_extmul_low_i16x8_u + | 0xbfl -> i32x4_extmul_high_i16x8_u | 0xc0l -> i64x2_eq | 0xc1l -> i64x2_neg | 0xcbl -> i64x2_shl @@ -393,6 +401,10 @@ let simd_prefix s = | 0xcel -> i64x2_add | 0xd0l -> i64x2_ne | 0xd1l -> i64x2_sub + | 0xd2l -> i64x2_extmul_low_i32x4_s + | 0xd3l -> i64x2_extmul_high_i32x4_s + | 0xd6l -> i64x2_extmul_low_i32x4_u + | 0xd7l -> i64x2_extmul_high_i32x4_u | 0xd5l -> i64x2_mul | 0xd8l -> f32x4_ceil | 0xd9l -> f32x4_floor diff --git a/interpreter/binary/encode.ml b/interpreter/binary/encode.ml index 8d2a207f3..662b0bd5a 100644 --- a/interpreter/binary/encode.ml +++ b/interpreter/binary/encode.ml @@ -467,6 +467,10 @@ let encode m = | Binary (V128 V128Op.(I16x8 MaxS)) -> simd_op 0x98l | Binary (V128 V128Op.(I16x8 MaxU)) -> simd_op 0x99l | Binary (V128 V128Op.(I16x8 AvgrU)) -> simd_op 0x9bl + | Binary (V128 V128Op.(I16x8 ExtMulLowS)) -> simd_op 0x9al + | Binary (V128 V128Op.(I16x8 ExtMulHighS)) -> simd_op 0x9dl + | Binary (V128 V128Op.(I16x8 ExtMulLowU)) -> simd_op 0x9el + | Binary (V128 V128Op.(I16x8 ExtMulHighU)) -> simd_op 0x9fl | Binary (V128 V128Op.(I32x4 Add)) -> simd_op 0xael | Binary (V128 V128Op.(I32x4 Sub)) -> simd_op 0xb1l | Binary (V128 V128Op.(I32x4 MinS)) -> simd_op 0xb6l @@ -485,11 +489,19 @@ let encode m = | Binary (V128 V128Op.(I32x4 LeU)) -> simd_op 0x3el | Binary (V128 V128Op.(I32x4 GeS)) -> simd_op 0x3fl | Binary (V128 V128Op.(I32x4 GeU)) -> simd_op 0x40l + | Binary (V128 V128Op.(I32x4 ExtMulLowS)) -> simd_op 0xbbl + | Binary (V128 V128Op.(I32x4 ExtMulHighS)) -> simd_op 0xbdl + | Binary (V128 V128Op.(I32x4 ExtMulLowU)) -> simd_op 0xbel + | Binary (V128 V128Op.(I32x4 ExtMulHighU)) -> simd_op 0xbfl | Binary (V128 V128Op.(I64x2 Add)) -> simd_op 0xcel | Binary (V128 V128Op.(I64x2 Sub)) -> simd_op 0xd1l | Binary (V128 V128Op.(I64x2 Mul)) -> simd_op 0xd5l | Binary (V128 V128Op.(I64x2 Eq)) -> simd_op 0xc0l | Binary (V128 V128Op.(I64x2 Ne)) -> simd_op 0xd0l + | Binary (V128 V128Op.(I64x2 ExtMulLowS)) -> simd_op 0xd2l + | Binary (V128 V128Op.(I64x2 ExtMulHighS)) -> simd_op 0xd3l + | Binary (V128 V128Op.(I64x2 ExtMulLowU)) -> simd_op 0xd6l + | Binary (V128 V128Op.(I64x2 ExtMulHighU)) -> simd_op 0xd7l | Binary (V128 V128Op.(F32x4 Eq)) -> simd_op 0x41l | Binary (V128 V128Op.(F32x4 Ne)) -> simd_op 0x42l | Binary (V128 V128Op.(F32x4 Lt)) -> simd_op 0x43l diff --git a/interpreter/exec/eval_simd.ml b/interpreter/exec/eval_simd.ml index ac3539207..6ab68766f 100644 --- a/interpreter/exec/eval_simd.ml +++ b/interpreter/exec/eval_simd.ml @@ -101,6 +101,10 @@ module SimdOp (SXX : Simd.S) (Value : ValueType with type t = SXX.t) = struct | I16x8 MaxS -> SXX.I16x8.max_s | I16x8 MaxU -> SXX.I16x8.max_u | I16x8 AvgrU -> SXX.I16x8.avgr_u + | I16x8 ExtMulLowS -> SXX.I16x8_convert.extmul_low_s + | I16x8 ExtMulHighS -> SXX.I16x8_convert.extmul_high_s + | I16x8 ExtMulLowU -> SXX.I16x8_convert.extmul_low_u + | I16x8 ExtMulHighU -> SXX.I16x8_convert.extmul_high_u | I32x4 Add -> SXX.I32x4.add | I32x4 Sub -> SXX.I32x4.sub | I32x4 MinS -> SXX.I32x4.min_s @@ -121,9 +125,17 @@ module SimdOp (SXX : Simd.S) (Value : ValueType with type t = SXX.t) = struct | I32x4 DotI16x8S -> SXX.I32x4_convert.dot_i16x8_s | I64x2 Eq -> SXX.I64x2.eq | I64x2 Ne -> SXX.I64x2.ne + | I32x4 ExtMulLowS -> SXX.I32x4_convert.extmul_low_s + | I32x4 ExtMulHighS -> SXX.I32x4_convert.extmul_high_s + | I32x4 ExtMulLowU -> SXX.I32x4_convert.extmul_low_u + | I32x4 ExtMulHighU -> SXX.I32x4_convert.extmul_high_u | I64x2 Add -> SXX.I64x2.add | I64x2 Sub -> SXX.I64x2.sub | I64x2 Mul -> SXX.I64x2.mul + | I64x2 ExtMulLowS -> SXX.I64x2_convert.extmul_low_s + | I64x2 ExtMulHighS -> SXX.I64x2_convert.extmul_high_s + | I64x2 ExtMulLowU -> SXX.I64x2_convert.extmul_low_u + | I64x2 ExtMulHighU -> SXX.I64x2_convert.extmul_high_u | F32x4 Eq -> SXX.F32x4.eq | F32x4 Ne -> SXX.F32x4.ne | F32x4 Lt -> SXX.F32x4.lt diff --git a/interpreter/exec/simd.ml b/interpreter/exec/simd.ml index dfbbdbd40..ab46cda8c 100644 --- a/interpreter/exec/simd.ml +++ b/interpreter/exec/simd.ml @@ -177,6 +177,10 @@ sig val widen_high_s : t -> t val widen_low_u : t -> t val widen_high_u : t -> t + val extmul_low_s : t -> t -> t + val extmul_high_s : t -> t -> t + val extmul_low_u : t -> t -> t + val extmul_high_u : t -> t -> t end module I32x4_convert : sig val trunc_sat_f32x4_s : t -> t @@ -186,10 +190,20 @@ sig val widen_low_u : t -> t val widen_high_u : t -> t val dot_i16x8_s : t -> t -> t + val extmul_low_s : t -> t -> t + val extmul_high_s : t -> t -> t + val extmul_low_u : t -> t -> t + val extmul_high_u : t -> t -> t end module I64x2_convert : sig val widen_low_s : t -> t + val widen_high_s : t -> t val widen_low_u : t -> t + val widen_high_u : t -> t + val extmul_low_s : t -> t -> t + val extmul_high_s : t -> t -> t + val extmul_low_u : t -> t -> t + val extmul_high_u : t -> t -> t end module F32x4_convert : sig val convert_i32x4_s : t -> t @@ -417,6 +431,10 @@ struct let widen_low_u = widen Lib.List.take 0xffl let widen_high_u = widen Lib.List.drop 0xffl + let extmul_low_s x y = I16x8.mul (widen_low_s x) (widen_low_s y) + let extmul_high_s x y = I16x8.mul (widen_high_s x) (widen_high_s y) + let extmul_low_u x y = I16x8.mul (widen_low_u x) (widen_low_u y) + let extmul_high_u x y = I16x8.mul (widen_high_u x) (widen_high_u y) end module I32x4_convert = struct @@ -441,16 +459,28 @@ struct | [], [] -> [] | _, _ -> assert false in Rep.of_i32x4 (dot xs ys) + + let extmul_low_s x y = I32x4.mul (widen_low_s x) (widen_low_s y) + let extmul_high_s x y = I32x4.mul (widen_high_s x) (widen_high_s y) + let extmul_low_u x y = I32x4.mul (widen_low_u x) (widen_low_u y) + let extmul_high_u x y = I32x4.mul (widen_high_u x) (widen_high_u y) end module I64x2_convert = struct - let widen mask x = + let widen take_or_drop mask x = Rep.of_i64x2 (List.map (fun i32 -> Int64.(logand mask (of_int32 i32))) - (Lib.List.take 2 (Rep.to_i32x4 x))) - let widen_low_s = widen 0xffffffffffffffffL - let widen_low_u = widen 0xffffffffL + (take_or_drop 2 (Rep.to_i32x4 x))) + let widen_low_s = widen Lib.List.take 0xffffffffffffffffL + let widen_high_s = widen Lib.List.drop 0xffffffffffffffffL + let widen_low_u = widen Lib.List.take 0xffffffffL + let widen_high_u = widen Lib.List.drop 0xffffffffL + + let extmul_low_s x y = I64x2.mul (widen_low_s x) (widen_low_s y) + let extmul_high_s x y = I64x2.mul (widen_high_s x) (widen_high_s y) + let extmul_low_u x y = I64x2.mul (widen_low_u x) (widen_low_u y) + let extmul_high_u x y = I64x2.mul (widen_high_u x) (widen_high_u y) end module F32x4_convert = struct diff --git a/interpreter/syntax/ast.ml b/interpreter/syntax/ast.ml index fcd9c22a6..5e2e59b44 100644 --- a/interpreter/syntax/ast.ml +++ b/interpreter/syntax/ast.ml @@ -55,6 +55,7 @@ struct | Swizzle | Shuffle of int list | NarrowS | NarrowU | AddSatS | AddSatU | SubSatS | SubSatU | DotI16x8S + | ExtMulLowS | ExtMulHighS | ExtMulLowU | ExtMulHighU type funop = Abs | Neg | Sqrt | Ceil | Floor | Trunc | Nearest | ConvertI32x4S | ConvertI32x4U diff --git a/interpreter/syntax/operators.ml b/interpreter/syntax/operators.ml index 09d2ef2a7..936c0f791 100644 --- a/interpreter/syntax/operators.ml +++ b/interpreter/syntax/operators.ml @@ -340,6 +340,10 @@ let i16x8_min_u = Binary (V128 V128Op.(I16x8 MinU)) let i16x8_max_s = Binary (V128 V128Op.(I16x8 MaxS)) let i16x8_max_u = Binary (V128 V128Op.(I16x8 MaxU)) let i16x8_avgr_u = Binary (V128 V128Op.(I16x8 AvgrU)) +let i16x8_extmul_low_i8x16_s = Binary (V128 V128Op.(I16x8 ExtMulLowS)) +let i16x8_extmul_high_i8x16_s = Binary (V128 V128Op.(I16x8 ExtMulHighS)) +let i16x8_extmul_low_i8x16_u = Binary (V128 V128Op.(I16x8 ExtMulLowU)) +let i16x8_extmul_high_i8x16_u = Binary (V128 V128Op.(I16x8 ExtMulHighU)) let i32x4_splat = Convert (V128 V128Op.(I32x4 Splat)) let i32x4_extract_lane imm = SimdExtract (V128Op.I32x4 (ZX, imm)) @@ -375,6 +379,10 @@ let i32x4_mul = Binary (V128 V128Op.(I32x4 Mul)) let i32x4_trunc_sat_f32x4_s = Unary (V128 V128Op.(I32x4 TruncSatF32x4S)) let i32x4_trunc_sat_f32x4_u = Unary (V128 V128Op.(I32x4 TruncSatF32x4U)) let i32x4_dot_i16x8_s = Binary (V128 V128Op.(I32x4 DotI16x8S)) +let i32x4_extmul_low_i16x8_s = Binary (V128 V128Op.(I32x4 ExtMulLowS)) +let i32x4_extmul_high_i16x8_s = Binary (V128 V128Op.(I32x4 ExtMulHighS)) +let i32x4_extmul_low_i16x8_u = Binary (V128 V128Op.(I32x4 ExtMulLowU)) +let i32x4_extmul_high_i16x8_u = Binary (V128 V128Op.(I32x4 ExtMulHighU)) let i64x2_splat = Convert (V128 V128Op.(I64x2 Splat)) let i64x2_extract_lane imm = SimdExtract (V128Op.I64x2 (ZX, imm)) @@ -388,6 +396,10 @@ let i64x2_mul = Binary (V128 V128Op.(I64x2 Mul)) let i64x2_shl = SimdShift V128Op.(I64x2 Shl) let i64x2_shr_s = SimdShift V128Op.(I64x2 ShrS) let i64x2_shr_u = SimdShift V128Op.(I64x2 ShrU) +let i64x2_extmul_low_i32x4_s = Binary (V128 V128Op.(I64x2 ExtMulLowS)) +let i64x2_extmul_high_i32x4_s = Binary (V128 V128Op.(I64x2 ExtMulHighS)) +let i64x2_extmul_low_i32x4_u = Binary (V128 V128Op.(I64x2 ExtMulLowU)) +let i64x2_extmul_high_i32x4_u = Binary (V128 V128Op.(I64x2 ExtMulHighU)) let f32x4_splat = Convert (V128 V128Op.(F32x4 Splat)) let f32x4_extract_lane imm = SimdExtract (V128Op.F32x4 (ZX, imm)) diff --git a/interpreter/text/arrange.ml b/interpreter/text/arrange.ml index 74ed20cbc..cb53033c7 100644 --- a/interpreter/text/arrange.ml +++ b/interpreter/text/arrange.ml @@ -298,6 +298,10 @@ struct | I16x8 MaxS -> "i16x8.max_s" | I16x8 MaxU -> "i16x8.max_u" | I16x8 AvgrU -> "i16x8.avgr_u" + | I16x8 ExtMulLowS -> "i16x8.extmul_low_i8x16_s" + | I16x8 ExtMulHighS -> "i16x8.extmul_high_i8x16_s" + | I16x8 ExtMulLowU -> "i16x8.extmul_low_i8x16_u" + | I16x8 ExtMulHighU -> "i16x8.extmul_high_i8x16_u" | I32x4 Add -> "i32x4.add" | I32x4 Sub -> "i32x4.sub" | I32x4 Mul -> "i32x4.mul" @@ -306,9 +310,17 @@ struct | I32x4 MaxS -> "i32x4.max_s" | I32x4 MaxU -> "i32x4.max_u" | I32x4 DotI16x8S -> "i32x4.dot_i16x8_s" + | I32x4 ExtMulLowS -> "i32x4.extmul_low_i16x8_s" + | I32x4 ExtMulHighS -> "i32x4.extmul_high_i16x8_s" + | I32x4 ExtMulLowU -> "i32x4.extmul_low_i16x8_u" + | I32x4 ExtMulHighU -> "i32x4.extmul_high_i16x8_u" | I64x2 Add -> "i64x2.add" | I64x2 Sub -> "i64x2.sub" | I64x2 Mul -> "i64x2.mul" + | I64x2 ExtMulLowS -> "i64x2.extmul_low_i32x4_s" + | I64x2 ExtMulHighS -> "i64x2.extmul_high_i32x4_s" + | I64x2 ExtMulLowU -> "i64x2.extmul_low_i32x4_u" + | I64x2 ExtMulHighU -> "i64x2.extmul_high_i32x4_u" | F32x4 Eq -> "f32x4.eq" | F32x4 Ne -> "f32x4.ne" | F32x4 Lt -> "f32x4.lt" diff --git a/interpreter/text/lexer.mll b/interpreter/text/lexer.mll index 0c59eba51..9058de503 100644 --- a/interpreter/text/lexer.mll +++ b/interpreter/text/lexer.mll @@ -576,6 +576,19 @@ rule token = parse | "i32x4.dot_i16x8_s" { BINARY i32x4_dot_i16x8_s } + | "i16x8.extmul_low_i8x16_"(sign as s) + { BINARY (ext s i16x8_extmul_low_i8x16_s i16x8_extmul_low_i8x16_u) } + | "i16x8.extmul_high_i8x16_"(sign as s) + { BINARY (ext s i16x8_extmul_high_i8x16_s i16x8_extmul_high_i8x16_u) } + | "i32x4.extmul_low_i16x8_"(sign as s) + { BINARY (ext s i32x4_extmul_low_i16x8_s i32x4_extmul_low_i16x8_u) } + | "i32x4.extmul_high_i16x8_"(sign as s) + { BINARY (ext s i32x4_extmul_high_i16x8_s i32x4_extmul_high_i16x8_u) } + | "i64x2.extmul_low_i32x4_"(sign as s) + { BINARY (ext s i64x2_extmul_low_i32x4_s i64x2_extmul_low_i32x4_u) } + | "i64x2.extmul_high_i32x4_"(sign as s) + { BINARY (ext s i64x2_extmul_high_i32x4_s i64x2_extmul_high_i32x4_u) } + | (simd_shape as s) { SIMD_SHAPE (simd_shape s) } | name as s { VAR s } diff --git a/test/core/simd/meta/gen_tests.py b/test/core/simd/meta/gen_tests.py index 3508df113..f9a167fea 100644 --- a/test/core/simd/meta/gen_tests.py +++ b/test/core/simd/meta/gen_tests.py @@ -33,6 +33,7 @@ 'simd_f64x2_pmin_pmax', 'simd_i32x4_dot_i16x8', 'simd_load_lane', + 'simd_ext_mul', ) diff --git a/test/core/simd/meta/simd_arithmetic.py b/test/core/simd/meta/simd_arithmetic.py index d4214c10e..53d92d290 100644 --- a/test/core/simd/meta/simd_arithmetic.py +++ b/test/core/simd/meta/simd_arithmetic.py @@ -35,14 +35,27 @@ def __str__(self): def lane(self): return self.LANE_VALUE.get(self.LANE_TYPE) + @property + def dst_lane(self): + return self.lane + + @property + def src_lane(self): + # Used for arithmetic that extends the lane, e.g. i16x8 lanes, which + # are extended multiply to i32x4. + if hasattr(self, 'SRC_LANE_TYPE'): + return self.LANE_VALUE.get(self.SRC_LANE_TYPE) + else: + return self.lane + @property def normal_unary_op_test_data(self): - lane = self.lane + lane = self.src_lane return [0, 1, -1, lane.max - 1, lane.min + 1, lane.min, lane.max, lane.mask] @property def normal_binary_op_test_data(self): - lane = self.lane + lane = self.src_lane return [ (0, 0), (0, 1), @@ -170,7 +183,7 @@ def get_case_data(self): for data_group, v128_forms in self.bin_test_data: for data in data_group: case_data.append([op_name, [str(data[0]), str(data[1])], - str(o.binary_op(data[0], data[1], self.lane)), + str(o.binary_op(data[0], data[1], self.src_lane, self.dst_lane)), v128_forms]) for data_group in self.full_bin_test_data: for data in data_group.get(op_name): @@ -183,7 +196,7 @@ def get_case_data(self): for data_group, v128_forms in self.unary_test_data: for data in data_group: case_data.append([op_name, [str(data)], - str(o.unary_op(data, self.lane)), + str(o.unary_op(data, self.dst_lane)), v128_forms]) return case_data diff --git a/test/core/simd/meta/simd_ext_mul.py b/test/core/simd/meta/simd_ext_mul.py new file mode 100644 index 000000000..30a4082f2 --- /dev/null +++ b/test/core/simd/meta/simd_ext_mul.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 + +""" Base class for generating extended multiply instructions. These +instructions 2 inputs of the same (narrower) lane shape, multiplies +corresponding lanes with extension (no overflow/wraparound), producing 1 output +of a (wider) shape. These instructions can choose to work on the low or high +halves of the inputs, and perform signed or unsigned multiply. + +Subclasses need to define 3 attributes: + - LANE_TYPE (this is the output shape) + - SRC_LANE_TYPE (this is the input (narrower) shape) + - BINARY_OPS (list of operations) +""" + +from simd_arithmetic import SimdArithmeticCase + + +class SimdExtMulCase(SimdArithmeticCase): + UNARY_OPS = () + + @property + def full_bin_test_data(self): + return [] + + def get_combine_cases(self): + return '' + + @property + def bin_test_data(self): + lane_forms = [self.SRC_LANE_TYPE, self.SRC_LANE_TYPE, self.LANE_TYPE] + return [(self.normal_binary_op_test_data, lane_forms)] + + @property + def hex_binary_op_test_data(self): + return [] + + def gen_test_cases(self): + wast_filename = '../simd_{wide}_extmul_{narrow}.wast'.format( + wide=self.LANE_TYPE, narrow=self.SRC_LANE_TYPE) + with open(wast_filename, 'w') as fp: + fp.write(self.get_all_cases()) + + +class SimdI16x8ExtMulCase(SimdExtMulCase): + LANE_TYPE = 'i16x8' + SRC_LANE_TYPE = 'i8x16' + BINARY_OPS = ('extmul_low_i8x16_s', 'extmul_high_i8x16_s', + 'extmul_low_i8x16_u', 'extmul_high_i8x16_u') + + +class SimdI32x4ExtMulCase(SimdExtMulCase): + LANE_TYPE = 'i32x4' + SRC_LANE_TYPE = 'i16x8' + BINARY_OPS = ('extmul_low_i16x8_s', 'extmul_high_i16x8_s', + 'extmul_low_i16x8_u', 'extmul_high_i16x8_u') + + +class SimdI64x2ExtMulCase(SimdExtMulCase): + LANE_TYPE = 'i64x2' + SRC_LANE_TYPE = 'i32x4' + BINARY_OPS = ('extmul_low_i32x4_s', 'extmul_high_i32x4_s', + 'extmul_low_i32x4_u', 'extmul_high_i32x4_u') + + +def gen_test_cases(): + simd_i16x8_ext_mul_case = SimdI16x8ExtMulCase() + simd_i16x8_ext_mul_case.gen_test_cases() + simd_i32x4_ext_mul_case = SimdI32x4ExtMulCase() + simd_i32x4_ext_mul_case.gen_test_cases() + simd_i64x2_ext_mul_case = SimdI64x2ExtMulCase() + simd_i64x2_ext_mul_case.gen_test_cases() + + +if __name__ == '__main__': + gen_test_cases() diff --git a/test/core/simd/meta/simd_integer_op.py b/test/core/simd/meta/simd_integer_op.py index ec7fede83..67bf207eb 100644 --- a/test/core/simd/meta/simd_integer_op.py +++ b/test/core/simd/meta/simd_integer_op.py @@ -15,6 +15,7 @@ class ArithmeticOp: add_sat_s, add_sat_u, sub_sat_s, sub_sat_u, min_s, min_u, max_s, max_u, avgr_u, abs + ext_mul_s, ext_mul_u """ def __init__(self, op: str): self.op = op @@ -121,7 +122,7 @@ def unary_op(self, operand, lane): return str(result) - def binary_op(self, operand1, operand2, lane): + def binary_op(self, operand1, operand2, src_lane, dst_lane=None): """General integer arithmetic and saturating arithmetic operations with 2 operands. @@ -130,12 +131,15 @@ def binary_op(self, operand1, operand2, lane): add_sat_s, add_sat_u, sub_sat_s, sub_sat_u, min_s, min_u, max_s, max_u, avgr_u + ext_mul_s, ext_mul_u (same as mul) :param operand1: the operand 1, integer or literal string in hex or decimal format :param operand2: the operand 2, integer or literal string in hex or decimal format - :param lane: the LaneValue instance of a lane in v128 + :param src_lane: the LaneValue instance of a lane in v128 :return: the string of the result of in hex or decimal format """ + if not dst_lane: + dst_lane = src_lane v1 = operand1 v2 = operand2 base1 = base2 = 10 @@ -155,27 +159,35 @@ def binary_op(self, operand1, operand2, lane): value = v1 - v2 elif self.op == 'mul': value = v1 * v2 + elif self.op.startswith('extmul_'): + if self.op.endswith('s'): + i1 = self.get_valid_value(v1, src_lane) + i2 = self.get_valid_value(v2, src_lane) + else: + i1 = self.get_valid_value(v1, src_lane, signed=False) + i2 = self.get_valid_value(v2, src_lane, signed=False) + value = i1 * i2 elif 'sat' in self.op: - value = self._saturate(v1, v2, lane) + value = self._saturate(v1, v2, src_lane) if self.op.endswith('_u'): result_signed = False elif self.op in ['min_s', 'max_s']: - i1 = self.get_valid_value(v1, lane) - i2 = self.get_valid_value(v2, lane) + i1 = self.get_valid_value(v1, src_lane) + i2 = self.get_valid_value(v2, src_lane) if self.op == 'min_s': return operand1 if i1 <= i2 else operand2 else: return operand1 if i1 >= i2 else operand2 elif self.op in ['min_u', 'max_u']: - i1 = self.get_valid_value(v1, lane, signed=False) - i2 = self.get_valid_value(v2, lane, signed=False) + i1 = self.get_valid_value(v1, src_lane, signed=False) + i2 = self.get_valid_value(v2, src_lane, signed=False) if self.op == 'min_u': return operand1 if i1 <= i2 else operand2 else: return operand1 if i1 >= i2 else operand2 elif self.op == 'avgr_u': - i1 = self.get_valid_value(v1, lane, signed=False) - i2 = self.get_valid_value(v2, lane, signed=False) + i1 = self.get_valid_value(v1, src_lane, signed=False) + i2 = self.get_valid_value(v2, src_lane, signed=False) result = (i1 + i2 + 1) // 2 if base1 == 16 or base2 == 16: return hex(result) @@ -184,5 +196,5 @@ def binary_op(self, operand1, operand2, lane): else: raise Exception('Unknown binary operation') - result = self.get_valid_value(value, lane, signed=result_signed) + result = self.get_valid_value(value, dst_lane, signed=result_signed) return str(result) diff --git a/test/core/simd/simd_i16x8_extmul_i8x16.wast b/test/core/simd/simd_i16x8_extmul_i8x16.wast new file mode 100644 index 000000000..cc8cf8f40 --- /dev/null +++ b/test/core/simd/simd_i16x8_extmul_i8x16.wast @@ -0,0 +1,404 @@ +;; Tests for i16x8 arithmetic operations on major boundary values and all special values. + + +(module + (func (export "i16x8.extmul_low_i8x16_s") (param v128 v128) (result v128) (i16x8.extmul_low_i8x16_s (local.get 0) (local.get 1))) + (func (export "i16x8.extmul_high_i8x16_s") (param v128 v128) (result v128) (i16x8.extmul_high_i8x16_s (local.get 0) (local.get 1))) + (func (export "i16x8.extmul_low_i8x16_u") (param v128 v128) (result v128) (i16x8.extmul_low_i8x16_u (local.get 0) (local.get 1))) + (func (export "i16x8.extmul_high_i8x16_u") (param v128 v128) (result v128) (i16x8.extmul_high_i8x16_u (local.get 0) (local.get 1))) +) + + +;; i16x8.extmul_low_i8x16_s +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0) + (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 1 1 1 1 1 1 1 1)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 1 1 1 1 1 1 1 1)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 63 63 63 63 63 63 63 63 63 63 63 63 63 63 63 63) + (v128.const i8x16 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64)) + (v128.const i16x8 4032 4032 4032 4032 4032 4032 4032 4032)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64) + (v128.const i8x16 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64)) + (v128.const i16x8 4096 4096 4096 4096 4096 4096 4096 4096)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63) + (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64)) + (v128.const i16x8 4032 4032 4032 4032 4032 4032 4032 4032)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64) + (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64)) + (v128.const i16x8 4096 4096 4096 4096 4096 4096 4096 4096)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65) + (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64)) + (v128.const i16x8 4160 4160 4160 4160 4160 4160 4160 4160)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 125 125 125 125 125 125 125 125 125 125 125 125 125 125 125 125) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 125 125 125 125 125 125 125 125)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 126 126 126 126 126 126 126 126 126 126 126 126 126 126 126 126) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 126 126 126 126 126 126 126 126)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 -128 -128 -128 -128 -128 -128 -128 -128)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 126 126 126 126 126 126 126 126)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 127 127 127 127 127 127 127 127)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 128 128 128 128 128 128 128 128)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127) + (v128.const i8x16 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127)) + (v128.const i16x8 16129 16129 16129 16129 16129 16129 16129 16129)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128) + (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128)) + (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128) + (v128.const i8x16 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127)) + (v128.const i16x8 16256 16256 16256 16256 16256 16256 16256 16256)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 1 1 1 1 1 1 1 1)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127)) + (v128.const i16x8 -127 -127 -127 -127 -127 -127 -127 -127)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128)) + (v128.const i16x8 128 128 128 128 128 128 128 128)) +(assert_return (invoke "i16x8.extmul_low_i8x16_s" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255)) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + +;; i16x8.extmul_high_i8x16_s +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0) + (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 1 1 1 1 1 1 1 1)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 1 1 1 1 1 1 1 1)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 63 63 63 63 63 63 63 63 63 63 63 63 63 63 63 63) + (v128.const i8x16 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64)) + (v128.const i16x8 4032 4032 4032 4032 4032 4032 4032 4032)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64) + (v128.const i8x16 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64)) + (v128.const i16x8 4096 4096 4096 4096 4096 4096 4096 4096)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63) + (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64)) + (v128.const i16x8 4032 4032 4032 4032 4032 4032 4032 4032)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64) + (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64)) + (v128.const i16x8 4096 4096 4096 4096 4096 4096 4096 4096)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65) + (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64)) + (v128.const i16x8 4160 4160 4160 4160 4160 4160 4160 4160)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 125 125 125 125 125 125 125 125 125 125 125 125 125 125 125 125) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 125 125 125 125 125 125 125 125)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 126 126 126 126 126 126 126 126 126 126 126 126 126 126 126 126) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 126 126 126 126 126 126 126 126)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 -128 -128 -128 -128 -128 -128 -128 -128)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 126 126 126 126 126 126 126 126)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 127 127 127 127 127 127 127 127)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 128 128 128 128 128 128 128 128)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127) + (v128.const i8x16 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127)) + (v128.const i16x8 16129 16129 16129 16129 16129 16129 16129 16129)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128) + (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128)) + (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128) + (v128.const i8x16 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127)) + (v128.const i16x8 16256 16256 16256 16256 16256 16256 16256 16256)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 1 1 1 1 1 1 1 1)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127)) + (v128.const i16x8 -127 -127 -127 -127 -127 -127 -127 -127)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128)) + (v128.const i16x8 128 128 128 128 128 128 128 128)) +(assert_return (invoke "i16x8.extmul_high_i8x16_s" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255)) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + +;; i16x8.extmul_low_i8x16_u +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0) + (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 1 1 1 1 1 1 1 1)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 255 255 255 255 255 255 255 255)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 -511 -511 -511 -511 -511 -511 -511 -511)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 63 63 63 63 63 63 63 63 63 63 63 63 63 63 63 63) + (v128.const i8x16 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64)) + (v128.const i16x8 4032 4032 4032 4032 4032 4032 4032 4032)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64) + (v128.const i8x16 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64)) + (v128.const i16x8 4096 4096 4096 4096 4096 4096 4096 4096)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63) + (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64)) + (v128.const i16x8 -28480 -28480 -28480 -28480 -28480 -28480 -28480 -28480)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64) + (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64)) + (v128.const i16x8 -28672 -28672 -28672 -28672 -28672 -28672 -28672 -28672)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65) + (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64)) + (v128.const i16x8 -28864 -28864 -28864 -28864 -28864 -28864 -28864 -28864)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 125 125 125 125 125 125 125 125 125 125 125 125 125 125 125 125) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 125 125 125 125 125 125 125 125)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 126 126 126 126 126 126 126 126 126 126 126 126 126 126 126 126) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 126 126 126 126 126 126 126 126)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 128 128 128 128 128 128 128 128)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 -32386 -32386 -32386 -32386 -32386 -32386 -32386 -32386)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 -32641 -32641 -32641 -32641 -32641 -32641 -32641 -32641)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 32640 32640 32640 32640 32640 32640 32640 32640)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127) + (v128.const i8x16 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127)) + (v128.const i16x8 16129 16129 16129 16129 16129 16129 16129 16129)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128) + (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128)) + (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128) + (v128.const i8x16 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127)) + (v128.const i16x8 16512 16512 16512 16512 16512 16512 16512 16512)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 255 255 255 255 255 255 255 255)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 -511 -511 -511 -511 -511 -511 -511 -511)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127)) + (v128.const i16x8 32385 32385 32385 32385 32385 32385 32385 32385)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128)) + (v128.const i16x8 32640 32640 32640 32640 32640 32640 32640 32640)) +(assert_return (invoke "i16x8.extmul_low_i8x16_u" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255)) + (v128.const i16x8 -511 -511 -511 -511 -511 -511 -511 -511)) + +;; i16x8.extmul_high_i8x16_u +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0) + (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 1 1 1 1 1 1 1 1)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 255 255 255 255 255 255 255 255)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 -511 -511 -511 -511 -511 -511 -511 -511)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 63 63 63 63 63 63 63 63 63 63 63 63 63 63 63 63) + (v128.const i8x16 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64)) + (v128.const i16x8 4032 4032 4032 4032 4032 4032 4032 4032)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64) + (v128.const i8x16 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64)) + (v128.const i16x8 4096 4096 4096 4096 4096 4096 4096 4096)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63 -63) + (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64)) + (v128.const i16x8 -28480 -28480 -28480 -28480 -28480 -28480 -28480 -28480)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64) + (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64)) + (v128.const i16x8 -28672 -28672 -28672 -28672 -28672 -28672 -28672 -28672)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65 -65) + (v128.const i8x16 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64 -64)) + (v128.const i16x8 -28864 -28864 -28864 -28864 -28864 -28864 -28864 -28864)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 125 125 125 125 125 125 125 125 125 125 125 125 125 125 125 125) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 125 125 125 125 125 125 125 125)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 126 126 126 126 126 126 126 126 126 126 126 126 126 126 126 126) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 126 126 126 126 126 126 126 126)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 128 128 128 128 128 128 128 128)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126 -126) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 -32386 -32386 -32386 -32386 -32386 -32386 -32386 -32386)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 -32641 -32641 -32641 -32641 -32641 -32641 -32641 -32641)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 32640 32640 32640 32640 32640 32640 32640 32640)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127) + (v128.const i8x16 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127)) + (v128.const i16x8 16129 16129 16129 16129 16129 16129 16129 16129)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128) + (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128)) + (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128) + (v128.const i8x16 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127 -127)) + (v128.const i16x8 16512 16512 16512 16512 16512 16512 16512 16512)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)) + (v128.const i16x8 0 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) + (v128.const i16x8 255 255 255 255 255 255 255 255)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i16x8 -511 -511 -511 -511 -511 -511 -511 -511)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127)) + (v128.const i16x8 32385 32385 32385 32385 32385 32385 32385 32385)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128 -128)) + (v128.const i16x8 32640 32640 32640 32640 32640 32640 32640 32640)) +(assert_return (invoke "i16x8.extmul_high_i8x16_u" (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255) + (v128.const i8x16 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255)) + (v128.const i16x8 -511 -511 -511 -511 -511 -511 -511 -511)) + +;; type check +(assert_invalid (module (func (result v128) (i16x8.extmul_low_i8x16_s (i32.const 0) (f32.const 0.0)))) "type mismatch") +(assert_invalid (module (func (result v128) (i16x8.extmul_high_i8x16_s (i32.const 0) (f32.const 0.0)))) "type mismatch") +(assert_invalid (module (func (result v128) (i16x8.extmul_low_i8x16_u (i32.const 0) (f32.const 0.0)))) "type mismatch") +(assert_invalid (module (func (result v128) (i16x8.extmul_high_i8x16_u (i32.const 0) (f32.const 0.0)))) "type mismatch") + +;; Test operation with empty argument + +(assert_invalid + (module + (func $i16x8.extmul_low_i8x16_s-1st-arg-empty (result v128) + (i16x8.extmul_low_i8x16_s (v128.const i16x8 0 0 0 0 0 0 0 0)) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i16x8.extmul_low_i8x16_s-arg-empty (result v128) + (i16x8.extmul_low_i8x16_s) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i16x8.extmul_high_i8x16_s-1st-arg-empty (result v128) + (i16x8.extmul_high_i8x16_s (v128.const i16x8 0 0 0 0 0 0 0 0)) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i16x8.extmul_high_i8x16_s-arg-empty (result v128) + (i16x8.extmul_high_i8x16_s) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i16x8.extmul_low_i8x16_u-1st-arg-empty (result v128) + (i16x8.extmul_low_i8x16_u (v128.const i16x8 0 0 0 0 0 0 0 0)) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i16x8.extmul_low_i8x16_u-arg-empty (result v128) + (i16x8.extmul_low_i8x16_u) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i16x8.extmul_high_i8x16_u-1st-arg-empty (result v128) + (i16x8.extmul_high_i8x16_u (v128.const i16x8 0 0 0 0 0 0 0 0)) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i16x8.extmul_high_i8x16_u-arg-empty (result v128) + (i16x8.extmul_high_i8x16_u) + ) + ) + "type mismatch" +) + diff --git a/test/core/simd/simd_i32x4_extmul_i16x8.wast b/test/core/simd/simd_i32x4_extmul_i16x8.wast new file mode 100644 index 000000000..f04db6770 --- /dev/null +++ b/test/core/simd/simd_i32x4_extmul_i16x8.wast @@ -0,0 +1,404 @@ +;; Tests for i32x4 arithmetic operations on major boundary values and all special values. + + +(module + (func (export "i32x4.extmul_low_i16x8_s") (param v128 v128) (result v128) (i32x4.extmul_low_i16x8_s (local.get 0) (local.get 1))) + (func (export "i32x4.extmul_high_i16x8_s") (param v128 v128) (result v128) (i32x4.extmul_high_i16x8_s (local.get 0) (local.get 1))) + (func (export "i32x4.extmul_low_i16x8_u") (param v128 v128) (result v128) (i32x4.extmul_low_i16x8_u (local.get 0) (local.get 1))) + (func (export "i32x4.extmul_high_i16x8_u") (param v128 v128) (result v128) (i32x4.extmul_high_i16x8_u (local.get 0) (local.get 1))) +) + + +;; i32x4.extmul_low_i16x8_s +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 0 0 0 0 0 0 0 0) + (v128.const i16x8 0 0 0 0 0 0 0 0)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 0 0 0 0 0 0 0 0) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 1 1 1 1 1 1 1 1) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 1 1 1 1)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 0 0 0 0 0 0 0 0) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 1 1 1 1 1 1 1 1) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 -1 -1 -1 -1)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 1 1 1 1)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 16383 16383 16383 16383 16383 16383 16383 16383) + (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384)) + (v128.const i32x4 268419072 268419072 268419072 268419072)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384) + (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384)) + (v128.const i32x4 268435456 268435456 268435456 268435456)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 -16383 -16383 -16383 -16383 -16383 -16383 -16383 -16383) + (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384)) + (v128.const i32x4 268419072 268419072 268419072 268419072)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384) + (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384)) + (v128.const i32x4 268435456 268435456 268435456 268435456)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 -16385 -16385 -16385 -16385 -16385 -16385 -16385 -16385) + (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384)) + (v128.const i32x4 268451840 268451840 268451840 268451840)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 32765 32765 32765 32765 32765 32765 32765 32765) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 32765 32765 32765 32765)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 32766 32766 32766 32766 32766 32766 32766 32766) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 32766 32766 32766 32766)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 32768 32768 32768 32768 32768 32768 32768 32768) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 -32768 -32768 -32768 -32768)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 -32766 -32766 -32766 -32766 -32766 -32766 -32766 -32766) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 32766 32766 32766 32766)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 -32767 -32767 -32767 -32767 -32767 -32767 -32767 -32767) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 32767 32767 32767 32767)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 32768 32768 32768 32768)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 32767 32767 32767 32767 32767 32767 32767 32767) + (v128.const i16x8 32767 32767 32767 32767 32767 32767 32767 32767)) + (v128.const i32x4 1073676289 1073676289 1073676289 1073676289)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768) + (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768)) + (v128.const i32x4 1073741824 1073741824 1073741824 1073741824)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768) + (v128.const i16x8 -32767 -32767 -32767 -32767 -32767 -32767 -32767 -32767)) + (v128.const i32x4 1073709056 1073709056 1073709056 1073709056)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 0 0 0 0 0 0 0 0)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 -1 -1 -1 -1)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 1 1 1 1)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 32767 32767 32767 32767 32767 32767 32767 32767)) + (v128.const i32x4 -32767 -32767 -32767 -32767)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768)) + (v128.const i32x4 32768 32768 32768 32768)) +(assert_return (invoke "i32x4.extmul_low_i16x8_s" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535)) + (v128.const i32x4 1 1 1 1)) + +;; i32x4.extmul_high_i16x8_s +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 0 0 0 0 0 0 0 0) + (v128.const i16x8 0 0 0 0 0 0 0 0)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 0 0 0 0 0 0 0 0) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 1 1 1 1 1 1 1 1) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 1 1 1 1)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 0 0 0 0 0 0 0 0) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 1 1 1 1 1 1 1 1) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 -1 -1 -1 -1)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 1 1 1 1)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 16383 16383 16383 16383 16383 16383 16383 16383) + (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384)) + (v128.const i32x4 268419072 268419072 268419072 268419072)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384) + (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384)) + (v128.const i32x4 268435456 268435456 268435456 268435456)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 -16383 -16383 -16383 -16383 -16383 -16383 -16383 -16383) + (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384)) + (v128.const i32x4 268419072 268419072 268419072 268419072)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384) + (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384)) + (v128.const i32x4 268435456 268435456 268435456 268435456)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 -16385 -16385 -16385 -16385 -16385 -16385 -16385 -16385) + (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384)) + (v128.const i32x4 268451840 268451840 268451840 268451840)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 32765 32765 32765 32765 32765 32765 32765 32765) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 32765 32765 32765 32765)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 32766 32766 32766 32766 32766 32766 32766 32766) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 32766 32766 32766 32766)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 32768 32768 32768 32768 32768 32768 32768 32768) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 -32768 -32768 -32768 -32768)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 -32766 -32766 -32766 -32766 -32766 -32766 -32766 -32766) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 32766 32766 32766 32766)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 -32767 -32767 -32767 -32767 -32767 -32767 -32767 -32767) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 32767 32767 32767 32767)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 32768 32768 32768 32768)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 32767 32767 32767 32767 32767 32767 32767 32767) + (v128.const i16x8 32767 32767 32767 32767 32767 32767 32767 32767)) + (v128.const i32x4 1073676289 1073676289 1073676289 1073676289)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768) + (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768)) + (v128.const i32x4 1073741824 1073741824 1073741824 1073741824)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768) + (v128.const i16x8 -32767 -32767 -32767 -32767 -32767 -32767 -32767 -32767)) + (v128.const i32x4 1073709056 1073709056 1073709056 1073709056)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 0 0 0 0 0 0 0 0)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 -1 -1 -1 -1)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 1 1 1 1)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 32767 32767 32767 32767 32767 32767 32767 32767)) + (v128.const i32x4 -32767 -32767 -32767 -32767)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768)) + (v128.const i32x4 32768 32768 32768 32768)) +(assert_return (invoke "i32x4.extmul_high_i16x8_s" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535)) + (v128.const i32x4 1 1 1 1)) + +;; i32x4.extmul_low_i16x8_u +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 0 0 0 0 0 0 0 0) + (v128.const i16x8 0 0 0 0 0 0 0 0)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 0 0 0 0 0 0 0 0) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 1 1 1 1 1 1 1 1) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 1 1 1 1)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 0 0 0 0 0 0 0 0) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 1 1 1 1 1 1 1 1) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 65535 65535 65535 65535)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 -131071 -131071 -131071 -131071)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 16383 16383 16383 16383 16383 16383 16383 16383) + (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384)) + (v128.const i32x4 268419072 268419072 268419072 268419072)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384) + (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384)) + (v128.const i32x4 268435456 268435456 268435456 268435456)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 -16383 -16383 -16383 -16383 -16383 -16383 -16383 -16383) + (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384)) + (v128.const i32x4 -1878999040 -1878999040 -1878999040 -1878999040)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384) + (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384)) + (v128.const i32x4 -1879048192 -1879048192 -1879048192 -1879048192)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 -16385 -16385 -16385 -16385 -16385 -16385 -16385 -16385) + (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384)) + (v128.const i32x4 -1879097344 -1879097344 -1879097344 -1879097344)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 32765 32765 32765 32765 32765 32765 32765 32765) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 32765 32765 32765 32765)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 32766 32766 32766 32766 32766 32766 32766 32766) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 32766 32766 32766 32766)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 32768 32768 32768 32768 32768 32768 32768 32768) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 32768 32768 32768 32768)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 -32766 -32766 -32766 -32766 -32766 -32766 -32766 -32766) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 -2147385346 -2147385346 -2147385346 -2147385346)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 -32767 -32767 -32767 -32767 -32767 -32767 -32767 -32767) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 -2147450881 -2147450881 -2147450881 -2147450881)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 2147450880 2147450880 2147450880 2147450880)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 32767 32767 32767 32767 32767 32767 32767 32767) + (v128.const i16x8 32767 32767 32767 32767 32767 32767 32767 32767)) + (v128.const i32x4 1073676289 1073676289 1073676289 1073676289)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768) + (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768)) + (v128.const i32x4 1073741824 1073741824 1073741824 1073741824)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768) + (v128.const i16x8 -32767 -32767 -32767 -32767 -32767 -32767 -32767 -32767)) + (v128.const i32x4 1073774592 1073774592 1073774592 1073774592)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 0 0 0 0 0 0 0 0)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 65535 65535 65535 65535)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 -131071 -131071 -131071 -131071)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 32767 32767 32767 32767 32767 32767 32767 32767)) + (v128.const i32x4 2147385345 2147385345 2147385345 2147385345)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768)) + (v128.const i32x4 2147450880 2147450880 2147450880 2147450880)) +(assert_return (invoke "i32x4.extmul_low_i16x8_u" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535)) + (v128.const i32x4 -131071 -131071 -131071 -131071)) + +;; i32x4.extmul_high_i16x8_u +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 0 0 0 0 0 0 0 0) + (v128.const i16x8 0 0 0 0 0 0 0 0)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 0 0 0 0 0 0 0 0) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 1 1 1 1 1 1 1 1) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 1 1 1 1)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 0 0 0 0 0 0 0 0) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 1 1 1 1 1 1 1 1) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 65535 65535 65535 65535)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 -131071 -131071 -131071 -131071)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 16383 16383 16383 16383 16383 16383 16383 16383) + (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384)) + (v128.const i32x4 268419072 268419072 268419072 268419072)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384) + (v128.const i16x8 16384 16384 16384 16384 16384 16384 16384 16384)) + (v128.const i32x4 268435456 268435456 268435456 268435456)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 -16383 -16383 -16383 -16383 -16383 -16383 -16383 -16383) + (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384)) + (v128.const i32x4 -1878999040 -1878999040 -1878999040 -1878999040)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384) + (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384)) + (v128.const i32x4 -1879048192 -1879048192 -1879048192 -1879048192)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 -16385 -16385 -16385 -16385 -16385 -16385 -16385 -16385) + (v128.const i16x8 -16384 -16384 -16384 -16384 -16384 -16384 -16384 -16384)) + (v128.const i32x4 -1879097344 -1879097344 -1879097344 -1879097344)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 32765 32765 32765 32765 32765 32765 32765 32765) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 32765 32765 32765 32765)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 32766 32766 32766 32766 32766 32766 32766 32766) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 32766 32766 32766 32766)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 32768 32768 32768 32768 32768 32768 32768 32768) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 32768 32768 32768 32768)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 -32766 -32766 -32766 -32766 -32766 -32766 -32766 -32766) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 -2147385346 -2147385346 -2147385346 -2147385346)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 -32767 -32767 -32767 -32767 -32767 -32767 -32767 -32767) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 -2147450881 -2147450881 -2147450881 -2147450881)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 2147450880 2147450880 2147450880 2147450880)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 32767 32767 32767 32767 32767 32767 32767 32767) + (v128.const i16x8 32767 32767 32767 32767 32767 32767 32767 32767)) + (v128.const i32x4 1073676289 1073676289 1073676289 1073676289)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768) + (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768)) + (v128.const i32x4 1073741824 1073741824 1073741824 1073741824)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768) + (v128.const i16x8 -32767 -32767 -32767 -32767 -32767 -32767 -32767 -32767)) + (v128.const i32x4 1073774592 1073774592 1073774592 1073774592)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 0 0 0 0 0 0 0 0)) + (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 1 1 1 1 1 1 1 1)) + (v128.const i32x4 65535 65535 65535 65535)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1)) + (v128.const i32x4 -131071 -131071 -131071 -131071)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 32767 32767 32767 32767 32767 32767 32767 32767)) + (v128.const i32x4 2147385345 2147385345 2147385345 2147385345)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 -32768 -32768 -32768 -32768 -32768 -32768 -32768 -32768)) + (v128.const i32x4 2147450880 2147450880 2147450880 2147450880)) +(assert_return (invoke "i32x4.extmul_high_i16x8_u" (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535) + (v128.const i16x8 65535 65535 65535 65535 65535 65535 65535 65535)) + (v128.const i32x4 -131071 -131071 -131071 -131071)) + +;; type check +(assert_invalid (module (func (result v128) (i32x4.extmul_low_i16x8_s (i32.const 0) (f32.const 0.0)))) "type mismatch") +(assert_invalid (module (func (result v128) (i32x4.extmul_high_i16x8_s (i32.const 0) (f32.const 0.0)))) "type mismatch") +(assert_invalid (module (func (result v128) (i32x4.extmul_low_i16x8_u (i32.const 0) (f32.const 0.0)))) "type mismatch") +(assert_invalid (module (func (result v128) (i32x4.extmul_high_i16x8_u (i32.const 0) (f32.const 0.0)))) "type mismatch") + +;; Test operation with empty argument + +(assert_invalid + (module + (func $i32x4.extmul_low_i16x8_s-1st-arg-empty (result v128) + (i32x4.extmul_low_i16x8_s (v128.const i32x4 0 0 0 0)) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i32x4.extmul_low_i16x8_s-arg-empty (result v128) + (i32x4.extmul_low_i16x8_s) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i32x4.extmul_high_i16x8_s-1st-arg-empty (result v128) + (i32x4.extmul_high_i16x8_s (v128.const i32x4 0 0 0 0)) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i32x4.extmul_high_i16x8_s-arg-empty (result v128) + (i32x4.extmul_high_i16x8_s) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i32x4.extmul_low_i16x8_u-1st-arg-empty (result v128) + (i32x4.extmul_low_i16x8_u (v128.const i32x4 0 0 0 0)) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i32x4.extmul_low_i16x8_u-arg-empty (result v128) + (i32x4.extmul_low_i16x8_u) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i32x4.extmul_high_i16x8_u-1st-arg-empty (result v128) + (i32x4.extmul_high_i16x8_u (v128.const i32x4 0 0 0 0)) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i32x4.extmul_high_i16x8_u-arg-empty (result v128) + (i32x4.extmul_high_i16x8_u) + ) + ) + "type mismatch" +) + diff --git a/test/core/simd/simd_i64x2_extmul_i32x4.wast b/test/core/simd/simd_i64x2_extmul_i32x4.wast new file mode 100644 index 000000000..9259279c9 --- /dev/null +++ b/test/core/simd/simd_i64x2_extmul_i32x4.wast @@ -0,0 +1,404 @@ +;; Tests for i64x2 arithmetic operations on major boundary values and all special values. + + +(module + (func (export "i64x2.extmul_low_i32x4_s") (param v128 v128) (result v128) (i64x2.extmul_low_i32x4_s (local.get 0) (local.get 1))) + (func (export "i64x2.extmul_high_i32x4_s") (param v128 v128) (result v128) (i64x2.extmul_high_i32x4_s (local.get 0) (local.get 1))) + (func (export "i64x2.extmul_low_i32x4_u") (param v128 v128) (result v128) (i64x2.extmul_low_i32x4_u (local.get 0) (local.get 1))) + (func (export "i64x2.extmul_high_i32x4_u") (param v128 v128) (result v128) (i64x2.extmul_high_i32x4_u (local.get 0) (local.get 1))) +) + + +;; i64x2.extmul_low_i32x4_s +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 0 0 0 0) + (v128.const i32x4 0 0 0 0)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 0 0 0 0) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 1 1 1 1) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 1 1)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 0 0 0 0) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 1 1 1 1) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 -1 -1)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 -1 -1 -1 -1) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 1 1)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 1073741823 1073741823 1073741823 1073741823) + (v128.const i32x4 1073741824 1073741824 1073741824 1073741824)) + (v128.const i64x2 1152921503533105152 1152921503533105152)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 1073741824 1073741824 1073741824 1073741824) + (v128.const i32x4 1073741824 1073741824 1073741824 1073741824)) + (v128.const i64x2 1152921504606846976 1152921504606846976)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 -1073741823 -1073741823 -1073741823 -1073741823) + (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824)) + (v128.const i64x2 1152921503533105152 1152921503533105152)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824) + (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824)) + (v128.const i64x2 1152921504606846976 1152921504606846976)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 -1073741825 -1073741825 -1073741825 -1073741825) + (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824)) + (v128.const i64x2 1152921505680588800 1152921505680588800)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 2147483645 2147483645 2147483645 2147483645) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 2147483645 2147483645)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 2147483646 2147483646 2147483646 2147483646) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 2147483646 2147483646)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 2147483648 2147483648 2147483648 2147483648) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 -2147483648 -2147483648)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 -2147483646 -2147483646 -2147483646 -2147483646) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 2147483646 2147483646)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 -2147483647 -2147483647 -2147483647 -2147483647) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 2147483647 2147483647)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 2147483648 2147483648)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 2147483647 2147483647 2147483647 2147483647) + (v128.const i32x4 2147483647 2147483647 2147483647 2147483647)) + (v128.const i64x2 4611686014132420609 4611686014132420609)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648) + (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648)) + (v128.const i64x2 4611686018427387904 4611686018427387904)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648) + (v128.const i32x4 -2147483647 -2147483647 -2147483647 -2147483647)) + (v128.const i64x2 4611686016279904256 4611686016279904256)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 0 0 0 0)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 -1 -1)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 1 1)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 2147483647 2147483647 2147483647 2147483647)) + (v128.const i64x2 -2147483647 -2147483647)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648)) + (v128.const i64x2 2147483648 2147483648)) +(assert_return (invoke "i64x2.extmul_low_i32x4_s" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 4294967295 4294967295 4294967295 4294967295)) + (v128.const i64x2 1 1)) + +;; i64x2.extmul_high_i32x4_s +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 0 0 0 0) + (v128.const i32x4 0 0 0 0)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 0 0 0 0) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 1 1 1 1) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 1 1)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 0 0 0 0) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 1 1 1 1) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 -1 -1)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 -1 -1 -1 -1) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 1 1)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 1073741823 1073741823 1073741823 1073741823) + (v128.const i32x4 1073741824 1073741824 1073741824 1073741824)) + (v128.const i64x2 1152921503533105152 1152921503533105152)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 1073741824 1073741824 1073741824 1073741824) + (v128.const i32x4 1073741824 1073741824 1073741824 1073741824)) + (v128.const i64x2 1152921504606846976 1152921504606846976)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 -1073741823 -1073741823 -1073741823 -1073741823) + (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824)) + (v128.const i64x2 1152921503533105152 1152921503533105152)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824) + (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824)) + (v128.const i64x2 1152921504606846976 1152921504606846976)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 -1073741825 -1073741825 -1073741825 -1073741825) + (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824)) + (v128.const i64x2 1152921505680588800 1152921505680588800)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 2147483645 2147483645 2147483645 2147483645) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 2147483645 2147483645)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 2147483646 2147483646 2147483646 2147483646) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 2147483646 2147483646)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 2147483648 2147483648 2147483648 2147483648) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 -2147483648 -2147483648)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 -2147483646 -2147483646 -2147483646 -2147483646) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 2147483646 2147483646)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 -2147483647 -2147483647 -2147483647 -2147483647) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 2147483647 2147483647)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 2147483648 2147483648)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 2147483647 2147483647 2147483647 2147483647) + (v128.const i32x4 2147483647 2147483647 2147483647 2147483647)) + (v128.const i64x2 4611686014132420609 4611686014132420609)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648) + (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648)) + (v128.const i64x2 4611686018427387904 4611686018427387904)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648) + (v128.const i32x4 -2147483647 -2147483647 -2147483647 -2147483647)) + (v128.const i64x2 4611686016279904256 4611686016279904256)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 0 0 0 0)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 -1 -1)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 1 1)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 2147483647 2147483647 2147483647 2147483647)) + (v128.const i64x2 -2147483647 -2147483647)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648)) + (v128.const i64x2 2147483648 2147483648)) +(assert_return (invoke "i64x2.extmul_high_i32x4_s" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 4294967295 4294967295 4294967295 4294967295)) + (v128.const i64x2 1 1)) + +;; i64x2.extmul_low_i32x4_u +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 0 0 0 0) + (v128.const i32x4 0 0 0 0)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 0 0 0 0) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 1 1 1 1) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 1 1)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 0 0 0 0) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 1 1 1 1) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 4294967295 4294967295)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 -1 -1 -1 -1) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 -8589934591 -8589934591)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 1073741823 1073741823 1073741823 1073741823) + (v128.const i32x4 1073741824 1073741824 1073741824 1073741824)) + (v128.const i64x2 1152921503533105152 1152921503533105152)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 1073741824 1073741824 1073741824 1073741824) + (v128.const i32x4 1073741824 1073741824 1073741824 1073741824)) + (v128.const i64x2 1152921504606846976 1152921504606846976)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 -1073741823 -1073741823 -1073741823 -1073741823) + (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824)) + (v128.const i64x2 -8070450529026703360 -8070450529026703360)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824) + (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824)) + (v128.const i64x2 -8070450532247928832 -8070450532247928832)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 -1073741825 -1073741825 -1073741825 -1073741825) + (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824)) + (v128.const i64x2 -8070450535469154304 -8070450535469154304)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 2147483645 2147483645 2147483645 2147483645) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 2147483645 2147483645)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 2147483646 2147483646 2147483646 2147483646) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 2147483646 2147483646)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 2147483648 2147483648 2147483648 2147483648) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 2147483648 2147483648)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 -2147483646 -2147483646 -2147483646 -2147483646) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 -9223372030412324866 -9223372030412324866)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 -2147483647 -2147483647 -2147483647 -2147483647) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 -9223372034707292161 -9223372034707292161)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 9223372034707292160 9223372034707292160)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 2147483647 2147483647 2147483647 2147483647) + (v128.const i32x4 2147483647 2147483647 2147483647 2147483647)) + (v128.const i64x2 4611686014132420609 4611686014132420609)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648) + (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648)) + (v128.const i64x2 4611686018427387904 4611686018427387904)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648) + (v128.const i32x4 -2147483647 -2147483647 -2147483647 -2147483647)) + (v128.const i64x2 4611686020574871552 4611686020574871552)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 0 0 0 0)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 4294967295 4294967295)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 -8589934591 -8589934591)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 2147483647 2147483647 2147483647 2147483647)) + (v128.const i64x2 9223372030412324865 9223372030412324865)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648)) + (v128.const i64x2 9223372034707292160 9223372034707292160)) +(assert_return (invoke "i64x2.extmul_low_i32x4_u" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 4294967295 4294967295 4294967295 4294967295)) + (v128.const i64x2 -8589934591 -8589934591)) + +;; i64x2.extmul_high_i32x4_u +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 0 0 0 0) + (v128.const i32x4 0 0 0 0)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 0 0 0 0) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 1 1 1 1) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 1 1)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 0 0 0 0) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 1 1 1 1) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 4294967295 4294967295)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 -1 -1 -1 -1) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 -8589934591 -8589934591)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 1073741823 1073741823 1073741823 1073741823) + (v128.const i32x4 1073741824 1073741824 1073741824 1073741824)) + (v128.const i64x2 1152921503533105152 1152921503533105152)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 1073741824 1073741824 1073741824 1073741824) + (v128.const i32x4 1073741824 1073741824 1073741824 1073741824)) + (v128.const i64x2 1152921504606846976 1152921504606846976)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 -1073741823 -1073741823 -1073741823 -1073741823) + (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824)) + (v128.const i64x2 -8070450529026703360 -8070450529026703360)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824) + (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824)) + (v128.const i64x2 -8070450532247928832 -8070450532247928832)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 -1073741825 -1073741825 -1073741825 -1073741825) + (v128.const i32x4 -1073741824 -1073741824 -1073741824 -1073741824)) + (v128.const i64x2 -8070450535469154304 -8070450535469154304)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 2147483645 2147483645 2147483645 2147483645) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 2147483645 2147483645)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 2147483646 2147483646 2147483646 2147483646) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 2147483646 2147483646)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 2147483648 2147483648 2147483648 2147483648) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 2147483648 2147483648)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 -2147483646 -2147483646 -2147483646 -2147483646) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 -9223372030412324866 -9223372030412324866)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 -2147483647 -2147483647 -2147483647 -2147483647) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 -9223372034707292161 -9223372034707292161)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 9223372034707292160 9223372034707292160)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 2147483647 2147483647 2147483647 2147483647) + (v128.const i32x4 2147483647 2147483647 2147483647 2147483647)) + (v128.const i64x2 4611686014132420609 4611686014132420609)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648) + (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648)) + (v128.const i64x2 4611686018427387904 4611686018427387904)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648) + (v128.const i32x4 -2147483647 -2147483647 -2147483647 -2147483647)) + (v128.const i64x2 4611686020574871552 4611686020574871552)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 0 0 0 0)) + (v128.const i64x2 0 0)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 1 1 1 1)) + (v128.const i64x2 4294967295 4294967295)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 -1 -1 -1 -1)) + (v128.const i64x2 -8589934591 -8589934591)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 2147483647 2147483647 2147483647 2147483647)) + (v128.const i64x2 9223372030412324865 9223372030412324865)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 -2147483648 -2147483648 -2147483648 -2147483648)) + (v128.const i64x2 9223372034707292160 9223372034707292160)) +(assert_return (invoke "i64x2.extmul_high_i32x4_u" (v128.const i32x4 4294967295 4294967295 4294967295 4294967295) + (v128.const i32x4 4294967295 4294967295 4294967295 4294967295)) + (v128.const i64x2 -8589934591 -8589934591)) + +;; type check +(assert_invalid (module (func (result v128) (i64x2.extmul_low_i32x4_s (i32.const 0) (f32.const 0.0)))) "type mismatch") +(assert_invalid (module (func (result v128) (i64x2.extmul_high_i32x4_s (i32.const 0) (f32.const 0.0)))) "type mismatch") +(assert_invalid (module (func (result v128) (i64x2.extmul_low_i32x4_u (i32.const 0) (f32.const 0.0)))) "type mismatch") +(assert_invalid (module (func (result v128) (i64x2.extmul_high_i32x4_u (i32.const 0) (f32.const 0.0)))) "type mismatch") + +;; Test operation with empty argument + +(assert_invalid + (module + (func $i64x2.extmul_low_i32x4_s-1st-arg-empty (result v128) + (i64x2.extmul_low_i32x4_s (v128.const i64x2 0 0)) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i64x2.extmul_low_i32x4_s-arg-empty (result v128) + (i64x2.extmul_low_i32x4_s) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i64x2.extmul_high_i32x4_s-1st-arg-empty (result v128) + (i64x2.extmul_high_i32x4_s (v128.const i64x2 0 0)) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i64x2.extmul_high_i32x4_s-arg-empty (result v128) + (i64x2.extmul_high_i32x4_s) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i64x2.extmul_low_i32x4_u-1st-arg-empty (result v128) + (i64x2.extmul_low_i32x4_u (v128.const i64x2 0 0)) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i64x2.extmul_low_i32x4_u-arg-empty (result v128) + (i64x2.extmul_low_i32x4_u) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i64x2.extmul_high_i32x4_u-1st-arg-empty (result v128) + (i64x2.extmul_high_i32x4_u (v128.const i64x2 0 0)) + ) + ) + "type mismatch" +) +(assert_invalid + (module + (func $i64x2.extmul_high_i32x4_u-arg-empty (result v128) + (i64x2.extmul_high_i32x4_u) + ) + ) + "type mismatch" +) +