From b2e0c765aaad4a2bc29d6f8dd1b8a1233b40e5f9 Mon Sep 17 00:00:00 2001 From: Ng Zhi An Date: Thu, 11 Feb 2021 16:04:53 -0800 Subject: [PATCH] [interpreter] f64x2.promote_low_f32x4 and f32x4.demote_f64x2_zero These 2 instructions were merged as part of #383. The test cases are add manually to simd_conversions.wast, using constants from test/core/conversions.wast. --- interpreter/binary/decode.ml | 2 + interpreter/binary/encode.ml | 2 + interpreter/exec/eval_simd.ml | 2 + interpreter/exec/simd.ml | 12 +++ interpreter/syntax/ast.ml | 1 + interpreter/syntax/operators.ml | 2 + interpreter/text/arrange.ml | 2 + interpreter/text/lexer.mll | 4 + test/core/simd/simd_conversions.wast | 152 +++++++++++++++++++++++++++ 9 files changed, 179 insertions(+) diff --git a/interpreter/binary/decode.ml b/interpreter/binary/decode.ml index 49673db25..8839c1d06 100644 --- a/interpreter/binary/decode.ml +++ b/interpreter/binary/decode.ml @@ -306,6 +306,7 @@ let simd_prefix s = | 0x52l -> v128_bitselect | 0x55l -> i32x4_trunc_sat_f64x2_s_zero | 0x56l -> i32x4_trunc_sat_f64x2_u_zero + | 0x57l -> f32x4_demote_f64x2_zero | 0x58l -> let a, o = memop s in let lane = u8 s in @@ -348,6 +349,7 @@ let simd_prefix s = | 0x6dl -> i8x16_shr_u | 0x65l -> i8x16_narrow_i16x8_s | 0x66l -> i8x16_narrow_i16x8_u + | 0x69l -> f64x2_promote_low_f32x4 | 0x6el -> i8x16_add | 0x6fl -> i8x16_add_sat_s | 0x70l -> i8x16_add_sat_u diff --git a/interpreter/binary/encode.ml b/interpreter/binary/encode.ml index a8e1cbae4..f84c78af9 100644 --- a/interpreter/binary/encode.ml +++ b/interpreter/binary/encode.ml @@ -386,6 +386,8 @@ let encode m = | Unary (V128 V128Op.(I32x4 TruncSatF64x2UZero)) -> simd_op 0x56l | Unary (V128 V128Op.(F32x4 ConvertI32x4S)) -> simd_op 0xfal | Unary (V128 V128Op.(F32x4 ConvertI32x4U)) -> simd_op 0xfbl + | Unary (V128 V128Op.(F32x4 DemoteF64x2Zero)) -> simd_op 0x57l + | Unary (V128 V128Op.(F64x2 PromoteLowF32x4)) -> simd_op 0x69l | Unary (V128 _) -> failwith "unimplemented V128 Unary op" | Binary (I32 I32Op.Add) -> op 0x6a diff --git a/interpreter/exec/eval_simd.ml b/interpreter/exec/eval_simd.ml index a7a3be2dc..4b1a555a6 100644 --- a/interpreter/exec/eval_simd.ml +++ b/interpreter/exec/eval_simd.ml @@ -50,6 +50,7 @@ module SimdOp (SXX : Simd.S) (Value : ValueType with type t = SXX.t) = struct | F32x4 Nearest -> to_value (SXX.F32x4.nearest (of_value 1 v)) | F32x4 ConvertI32x4S -> to_value (SXX.F32x4_convert.convert_i32x4_s (of_value 1 v)) | F32x4 ConvertI32x4U -> to_value (SXX.F32x4_convert.convert_i32x4_u (of_value 1 v)) + | F32x4 DemoteF64x2Zero -> to_value (SXX.F32x4_convert.demote_f64x2_zero (of_value 1 v)) | F64x2 Abs -> to_value (SXX.F64x2.abs (of_value 1 v)) | F64x2 Neg -> to_value (SXX.F64x2.neg (of_value 1 v)) | F64x2 Sqrt -> to_value (SXX.F64x2.sqrt (of_value 1 v)) @@ -57,6 +58,7 @@ module SimdOp (SXX : Simd.S) (Value : ValueType with type t = SXX.t) = struct | F64x2 Floor -> to_value (SXX.F64x2.floor (of_value 1 v)) | F64x2 Trunc -> to_value (SXX.F64x2.trunc (of_value 1 v)) | F64x2 Nearest -> to_value (SXX.F64x2.nearest (of_value 1 v)) + | F64x2 PromoteLowF32x4 -> to_value (SXX.F64x2_convert.promote_low_f32x4 (of_value 1 v)) | V128 Not -> to_value (SXX.V128.lognot (of_value 1 v)) | _ -> assert false diff --git a/interpreter/exec/simd.ml b/interpreter/exec/simd.ml index 36299e13d..3fec3045d 100644 --- a/interpreter/exec/simd.ml +++ b/interpreter/exec/simd.ml @@ -212,6 +212,11 @@ sig module F32x4_convert : sig val convert_i32x4_s : t -> t val convert_i32x4_u : t -> t + val demote_f64x2_zero : t -> t + end + + module F64x2_convert : sig + val promote_low_f32x4 : t -> t end end @@ -499,5 +504,12 @@ struct let convert f v = Rep.of_f32x4 (List.map f (Rep.to_i32x4 v)) let convert_i32x4_s = convert F32_convert.convert_i32_s let convert_i32x4_u = convert F32_convert.convert_i32_u + let demote_f64x2_zero v = + Rep.(of_f32x4 F32.(List.map F32_convert.demote_f64 (to_f64x2 v) @ [zero; zero])) + end + + module F64x2_convert = struct + let promote_low_f32x4 v = + Rep.(of_f64x2 (List.map F64_convert.promote_f32 (Lib.List.take 2 (to_f32x4 v)))) end end diff --git a/interpreter/syntax/ast.ml b/interpreter/syntax/ast.ml index 92fe56ebe..934fca825 100644 --- a/interpreter/syntax/ast.ml +++ b/interpreter/syntax/ast.ml @@ -60,6 +60,7 @@ struct type funop = Abs | Neg | Sqrt | Ceil | Floor | Trunc | Nearest | ConvertI32x4S | ConvertI32x4U + | DemoteF64x2Zero | PromoteLowF32x4 type fbinop = Add | Sub | Mul | Div | Min | Max | Pmin | Pmax | Eq | Ne | Lt | Le | Gt | Ge type vunop = Not diff --git a/interpreter/syntax/operators.ml b/interpreter/syntax/operators.ml index ea1395268..828186a58 100644 --- a/interpreter/syntax/operators.ml +++ b/interpreter/syntax/operators.ml @@ -451,6 +451,7 @@ let f32x4_convert_i32x4_s = Unary (V128 V128Op.(F32x4 ConvertI32x4S)) let f32x4_convert_i32x4_u = Unary (V128 V128Op.(F32x4 ConvertI32x4U)) let f32x4_pmin = Binary (V128 V128Op.(F32x4 Pmin)) let f32x4_pmax = Binary (V128 V128Op.(F32x4 Pmax)) +let f32x4_demote_f64x2_zero = Unary (V128 V128Op.(F32x4 DemoteF64x2Zero)) let f64x2_splat = Convert (V128 V128Op.(F64x2 Splat)) let f64x2_extract_lane imm = SimdExtract (V128Op.F64x2 (ZX, imm)) @@ -476,3 +477,4 @@ let f64x2_max = Binary (V128 V128Op.(F64x2 Max)) let f64x2_abs = Unary (V128 V128Op.(F64x2 Abs)) let f64x2_pmin = Binary (V128 V128Op.(F64x2 Pmin)) let f64x2_pmax = Binary (V128 V128Op.(F64x2 Pmax)) +let f64x2_promote_low_f32x4 = Unary (V128 V128Op.(F64x2 PromoteLowF32x4)) diff --git a/interpreter/text/arrange.ml b/interpreter/text/arrange.ml index 4e9dd9e1d..8e39925b9 100644 --- a/interpreter/text/arrange.ml +++ b/interpreter/text/arrange.ml @@ -230,6 +230,7 @@ struct | F32x4 Floor -> "f32x4.floor" | F32x4 Trunc -> "f32x4.trunc" | F32x4 Nearest -> "f32x4.nearest" + | F32x4 DemoteF64x2Zero -> "f32x4.demote_f64x2_zero" | F64x2 Ceil -> "f64x2.ceil" | F64x2 Floor -> "f64x2.floor" | F64x2 Trunc -> "f64x2.trunc" @@ -242,6 +243,7 @@ struct | F64x2 Abs -> "f64x2.abs" | F64x2 Neg -> "f64x2.neg" | F64x2 Sqrt -> "f64x2.sqrt" + | F64x2 PromoteLowF32x4 -> "f64x2.promote_low_f32x4" | V128 Not -> "v128.not" | _ -> failwith "Unimplemented v128 unop" diff --git a/interpreter/text/lexer.mll b/interpreter/text/lexer.mll index 2632ff686..52670cdcd 100644 --- a/interpreter/text/lexer.mll +++ b/interpreter/text/lexer.mll @@ -554,6 +554,10 @@ rule token = parse { UNARY (ext s i32x4_trunc_sat_f32x4_s i32x4_trunc_sat_f32x4_u) } | "i32x4.trunc_sat_f64x2_"(sign as s)"_zero" { UNARY (ext s i32x4_trunc_sat_f64x2_s_zero i32x4_trunc_sat_f64x2_u_zero) } + | "f64x2.promote_low_f32x4" + { UNARY f64x2_promote_low_f32x4 } + | "f32x4.demote_f64x2_zero" + { UNARY f32x4_demote_f64x2_zero } | "f32x4.convert_i32x4_"(sign as s) { UNARY (ext s f32x4_convert_i32x4_s f32x4_convert_i32x4_u) } | "i8x16.narrow_i16x8_"(sign as s) diff --git a/test/core/simd/simd_conversions.wast b/test/core/simd/simd_conversions.wast index 404bfa01e..436a16d41 100644 --- a/test/core/simd/simd_conversions.wast +++ b/test/core/simd/simd_conversions.wast @@ -16,8 +16,160 @@ (i16x8.narrow_i32x4_s (local.get 0) (local.get 1))) (func (export "i16x8.narrow_i32x4_u") (param v128 v128) (result v128) (i16x8.narrow_i32x4_u (local.get 0)(local.get 1))) + + ;; Float to float promote/demote + (func (export "f64x2.promote_low_f32x4") (param v128) (result v128) + (f64x2.promote_low_f32x4 (local.get 0))) + (func (export "f32x4.demote_f64x2_zero") (param v128) (result v128) + (f32x4.demote_f64x2_zero (local.get 0))) ) +;; f64x2.promote_low_f32x4 +;; Float constants copied from test/core/conversions.wast. + +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 0.0 0.0 0.0 0.0)) + (v128.const f64x2 0.0 0.0)) +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 -0.0 -0.0 -0.0 -0.0)) + (v128.const f64x2 -0.0 -0.0)) +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 0x1p-149 0x1p-149 0x1p-149 0x1p-149)) + (v128.const f64x2 0x1p-149 0x1p-149)) +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 -0x1p-149 -0x1p-149 -0x1p-149 -0x1p-149)) + (v128.const f64x2 -0x1p-149 -0x1p-149)) +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 1.0 1.0 1.0 1.0)) + (v128.const f64x2 1.0 1.0)) +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 -1.0 -1.0 -1.0 -1.0)) + (v128.const f64x2 -1.0 -1.0)) +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 -0x1.fffffep+127 -0x1.fffffep+127 -0x1.fffffep+127 -0x1.fffffep+127)) + (v128.const f64x2 -0x1.fffffep+127 -0x1.fffffep+127)) +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 0x1.fffffep+127 0x1.fffffep+127 0x1.fffffep+127 0x1.fffffep+127)) + (v128.const f64x2 0x1.fffffep+127 0x1.fffffep+127)) +;; Generated randomly by picking a random int and reinterpret it to float. +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 0x1p-119 0x1p-119 0x1p-119 0x1p-119)) + (v128.const f64x2 0x1p-119 0x1p-119)) +;; Generated randomly by picking a random float. +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 0x1.8f867ep+125 0x1.8f867ep+125 0x1.8f867ep+125 0x1.8f867ep+125)) + (v128.const f64x2 6.6382536710104395e+37 6.6382536710104395e+37)) +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 inf inf inf inf)) + (v128.const f64x2 inf inf)) +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 -inf -inf -inf -inf)) + (v128.const f64x2 -inf -inf)) +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 nan nan nan nan)) + (v128.const f64x2 nan:canonical nan:canonical)) +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 nan:0x200000 nan:0x200000 nan:0x200000 nan:0x200000)) + (v128.const f64x2 nan:arithmetic nan:arithmetic)) +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 -nan -nan -nan -nan)) + (v128.const f64x2 nan:canonical nan:canonical)) +(assert_return (invoke "f64x2.promote_low_f32x4" (v128.const f32x4 -nan:0x200000 -nan:0x200000 -nan:0x200000 -nan:0x200000)) + (v128.const f64x2 nan:arithmetic nan:arithmetic)) + +;; f32x4.demote_f64x2_zero +;; Float constants copied from test/core/conversions.wast. + +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0.0 0.0)) + (v128.const f32x4 0.0 0.0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -0.0 -0.0)) + (v128.const f32x4 -0.0 -0.0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x0.0000000000001p-1022 0x0.0000000000001p-1022)) + (v128.const f32x4 0.0 0.0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -0x0.0000000000001p-1022 -0x0.0000000000001p-1022)) + (v128.const f32x4 -0.0 -0.0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 1.0 1.0)) + (v128.const f32x4 1.0 1.0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -1.0 -1.0)) + (v128.const f32x4 -1.0 -1.0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.fffffe0000000p-127 0x1.fffffe0000000p-127)) + (v128.const f32x4 0x1p-126 0x1p-126 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -0x1.fffffe0000000p-127 -0x1.fffffe0000000p-127)) + (v128.const f32x4 -0x1p-126 -0x1p-126 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.fffffdfffffffp-127 0x1.fffffdfffffffp-127)) + (v128.const f32x4 0x1.fffffcp-127 0x1.fffffcp-127 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -0x1.fffffdfffffffp-127 -0x1.fffffdfffffffp-127)) + (v128.const f32x4 -0x1.fffffcp-127 -0x1.fffffcp-127 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1p-149 0x1p-149)) + (v128.const f32x4 0x1p-149 0x1p-149 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -0x1p-149 -0x1p-149)) + (v128.const f32x4 -0x1p-149 -0x1p-149 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.fffffd0000000p+127 0x1.fffffd0000000p+127)) + (v128.const f32x4 0x1.fffffcp+127 0x1.fffffcp+127 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -0x1.fffffd0000000p+127 -0x1.fffffd0000000p+127)) + (v128.const f32x4 -0x1.fffffcp+127 -0x1.fffffcp+127 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.fffffd0000001p+127 0x1.fffffd0000001p+127)) + (v128.const f32x4 0x1.fffffep+127 0x1.fffffep+127 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -0x1.fffffd0000001p+127 -0x1.fffffd0000001p+127)) + (v128.const f32x4 -0x1.fffffep+127 -0x1.fffffep+127 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.fffffep+127 0x1.fffffep+127)) + (v128.const f32x4 0x1.fffffep+127 0x1.fffffep+127 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -0x1.fffffep+127 -0x1.fffffep+127)) + (v128.const f32x4 -0x1.fffffep+127 -0x1.fffffep+127 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.fffffefffffffp+127 0x1.fffffefffffffp+127)) + (v128.const f32x4 0x1.fffffep+127 0x1.fffffep+127 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -0x1.fffffefffffffp+127 -0x1.fffffefffffffp+127)) + (v128.const f32x4 -0x1.fffffep+127 -0x1.fffffep+127 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.ffffffp+127 0x1.ffffffp+127)) + (v128.const f32x4 inf inf 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -0x1.ffffffp+127 -0x1.ffffffp+127)) + (v128.const f32x4 -inf -inf 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1p-119 0x1p-119)) + (v128.const f32x4 0x1p-119 0x1p-119 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.8f867ep+125 0x1.8f867ep+125)) + (v128.const f32x4 0x1.8f867ep+125 0x1.8f867ep+125 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 inf inf)) + (v128.const f32x4 inf inf 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -inf -inf)) + (v128.const f32x4 -inf -inf 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.0000000000001p+0 0x1.0000000000001p+0)) + (v128.const f32x4 1.0 1.0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.fffffffffffffp-1 0x1.fffffffffffffp-1)) + (v128.const f32x4 1.0 1.0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.0000010000000p+0 0x1.0000010000000p+0)) + (v128.const f32x4 0x1.000000p+0 0x1.000000p+0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.0000010000001p+0 0x1.0000010000001p+0)) + (v128.const f32x4 0x1.000002p+0 0x1.000002p+0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.000002fffffffp+0 0x1.000002fffffffp+0)) + (v128.const f32x4 0x1.000002p+0 0x1.000002p+0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.0000030000000p+0 0x1.0000030000000p+0)) + (v128.const f32x4 0x1.000004p+0 0x1.000004p+0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.0000050000000p+0 0x1.0000050000000p+0)) + (v128.const f32x4 0x1.000004p+0 0x1.000004p+0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.0000010000000p+24 0x1.0000010000000p+24)) + (v128.const f32x4 0x1.0p+24 0x1.0p+24 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.0000010000001p+24 0x1.0000010000001p+24)) + (v128.const f32x4 0x1.000002p+24 0x1.000002p+24 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.000002fffffffp+24 0x1.000002fffffffp+24)) + (v128.const f32x4 0x1.000002p+24 0x1.000002p+24 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.0000030000000p+24 0x1.0000030000000p+24)) + (v128.const f32x4 0x1.000004p+24 0x1.000004p+24 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.4eae4f7024c7p+108 0x1.4eae4f7024c7p+108)) + (v128.const f32x4 0x1.4eae5p+108 0x1.4eae5p+108 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.a12e71e358685p-113 0x1.a12e71e358685p-113)) + (v128.const f32x4 0x1.a12e72p-113 0x1.a12e72p-113 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.cb98354d521ffp-127 0x1.cb98354d521ffp-127)) + (v128.const f32x4 0x1.cb9834p-127 0x1.cb9834p-127 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -0x1.6972b30cfb562p+1 -0x1.6972b30cfb562p+1)) + (v128.const f32x4 -0x1.6972b4p+1 -0x1.6972b4p+1 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -0x1.bedbe4819d4c4p+112 -0x1.bedbe4819d4c4p+112)) + (v128.const f32x4 -0x1.bedbe4p+112 -0x1.bedbe4p+112 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 nan nan)) + (v128.const f32x4 nan:canonical nan:canonical 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 nan:0x4000000000000 nan:0x4000000000000)) + (v128.const f32x4 nan:arithmetic nan:arithmetic 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -nan -nan)) + (v128.const f32x4 nan:canonical nan:canonical 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -nan:0x4000000000000 -nan:0x4000000000000)) + (v128.const f32x4 nan:arithmetic nan:arithmetic 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1p-1022 0x1p-1022)) + (v128.const f32x4 0.0 0.0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -0x1p-1022 -0x1p-1022)) + (v128.const f32x4 -0.0 -0.0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.0p-150 0x1.0p-150)) + (v128.const f32x4 0.0 0.0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -0x1.0p-150 -0x1.0p-150)) + (v128.const f32x4 -0.0 -0.0 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 0x1.0000000000001p-150 0x1.0000000000001p-150)) + (v128.const f32x4 0x1p-149 0x1p-149 0 0)) +(assert_return (invoke "f32x4.demote_f64x2_zero" (v128.const f64x2 -0x1.0000000000001p-150 -0x1.0000000000001p-150)) + (v128.const f32x4 -0x1p-149 -0x1p-149 0 0)) + ;; Integer to floating point ;; f32x4.convert_i32x4_s