From 5b353130996959f1f4c6d64b3eb38b63025ac94d Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Thu, 12 Dec 2019 13:56:11 -0800 Subject: [PATCH 01/12] Use type traits to ensure that the float and int types match in trunc_sat. --- lib/llvm-backend/src/code.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/llvm-backend/src/code.rs b/lib/llvm-backend/src/code.rs index e424fdee9b8..b3a8d7f85dd 100644 --- a/lib/llvm-backend/src/code.rs +++ b/lib/llvm-backend/src/code.rs @@ -13,7 +13,7 @@ use inkwell::{ module::{Linkage, Module}, passes::PassManager, targets::{CodeModel, InitializationConfig, RelocMode, Target, TargetMachine}, - types::{BasicType, BasicTypeEnum, FunctionType, PointerType, VectorType}, + types::{BasicType, BasicTypeEnum, FloatMathType, FunctionType, PointerType, VectorType}, values::{ BasicValue, BasicValueEnum, FloatValue, FunctionValue, IntValue, PhiValue, PointerValue, VectorValue, @@ -99,13 +99,12 @@ fn splat_vector<'ctx>( } // Convert floating point vector to integer and saturate when out of range. -// TODO: generalize to non-vectors using FloatMathType, IntMathType, etc. for // https://github.com/WebAssembly/nontrapping-float-to-int-conversions/blob/master/proposals/nontrapping-float-to-int-conversion/Overview.md -fn trunc_sat<'ctx>( +fn trunc_sat<'ctx, T: FloatMathType<'ctx>>( builder: &Builder<'ctx>, intrinsics: &Intrinsics<'ctx>, - fvec_ty: VectorType<'ctx>, - ivec_ty: VectorType<'ctx>, + fvec_ty: T, + ivec_ty: T::MathConvType, lower_bound: u64, // Exclusive (lowest representable value) upper_bound: u64, // Exclusive (greatest representable value) int_min_value: u64, @@ -126,6 +125,9 @@ fn trunc_sat<'ctx>( // f) Use our previous comparison results to replace certain zeros with // int_min or int_max. + let fvec_ty = fvec_ty.as_basic_type_enum().into_vector_type(); + let ivec_ty = ivec_ty.as_basic_type_enum().into_vector_type(); + let is_signed = int_min_value != 0; let ivec_element_ty = ivec_ty.get_element_type().into_int_type(); let int_min_value = splat_vector( @@ -4492,6 +4494,7 @@ impl<'ctx> FunctionCodeGenerator for LLVMFunctionCodeGenerator<'ct } Operator::I64TruncSSatF32 | Operator::I64TruncSSatF64 => { let v1 = state.pop1()?.into_float_value(); + let res = builder.build_float_to_signed_int(v1, intrinsics.i64_ty, &state.var_name()); state.push1(res); From 0cfe08fff3982b245887b4f4f6f509a7f960c7a2 Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Thu, 12 Dec 2019 20:27:30 -0800 Subject: [PATCH 02/12] Correct implementation of non-trapping float to int conversions in the llvm backend. --- lib/llvm-backend/src/code.rs | 241 +++++++++++++++++++++++++++++++---- 1 file changed, 217 insertions(+), 24 deletions(-) diff --git a/lib/llvm-backend/src/code.rs b/lib/llvm-backend/src/code.rs index b3a8d7f85dd..a566b5a27b2 100644 --- a/lib/llvm-backend/src/code.rs +++ b/lib/llvm-backend/src/code.rs @@ -13,7 +13,9 @@ use inkwell::{ module::{Linkage, Module}, passes::PassManager, targets::{CodeModel, InitializationConfig, RelocMode, Target, TargetMachine}, - types::{BasicType, BasicTypeEnum, FloatMathType, FunctionType, PointerType, VectorType}, + types::{ + BasicType, BasicTypeEnum, FloatMathType, FunctionType, IntType, PointerType, VectorType, + }, values::{ BasicValue, BasicValueEnum, FloatValue, FunctionValue, IntValue, PhiValue, PointerValue, VectorValue, @@ -127,9 +129,10 @@ fn trunc_sat<'ctx, T: FloatMathType<'ctx>>( let fvec_ty = fvec_ty.as_basic_type_enum().into_vector_type(); let ivec_ty = ivec_ty.as_basic_type_enum().into_vector_type(); - - let is_signed = int_min_value != 0; + let fvec_element_ty = fvec_ty.get_element_type().into_float_type(); let ivec_element_ty = ivec_ty.get_element_type().into_int_type(); + + let is_signed = int_min_value != 0; let int_min_value = splat_vector( builder, intrinsics, @@ -151,26 +154,26 @@ fn trunc_sat<'ctx, T: FloatMathType<'ctx>>( let lower_bound = if is_signed { builder.build_signed_int_to_float( ivec_element_ty.const_int(lower_bound, is_signed), - fvec_ty.get_element_type().into_float_type(), + fvec_element_ty, "", ) } else { builder.build_unsigned_int_to_float( ivec_element_ty.const_int(lower_bound, is_signed), - fvec_ty.get_element_type().into_float_type(), + fvec_element_ty, "", ) }; let upper_bound = if is_signed { builder.build_signed_int_to_float( ivec_element_ty.const_int(upper_bound, is_signed), - fvec_ty.get_element_type().into_float_type(), + fvec_element_ty, "", ) } else { builder.build_unsigned_int_to_float( ivec_element_ty.const_int(upper_bound, is_signed), - fvec_ty.get_element_type().into_float_type(), + fvec_element_ty, "", ) }; @@ -222,6 +225,93 @@ fn trunc_sat<'ctx, T: FloatMathType<'ctx>>( .into_int_value() } +// Convert floating point vector to integer and saturate when out of range. +// https://github.com/WebAssembly/nontrapping-float-to-int-conversions/blob/master/proposals/nontrapping-float-to-int-conversion/Overview.md +fn trunc_sat_scalar<'ctx>( + builder: &Builder<'ctx>, + int_ty: IntType<'ctx>, + lower_bound: u64, // Exclusive (lowest representable value) + upper_bound: u64, // Exclusive (greatest representable value) + int_min_value: u64, + int_max_value: u64, + value: FloatValue<'ctx>, + name: &str, +) -> IntValue<'ctx> { + // TODO: this is a scalarized version of the process in trunc_sat. Either + // we should merge with trunc_sat, or we should simplify this function. + + // a) Compare value with itself to identify NaN. + // b) Compare value inttofp(upper_bound) to identify values that need to + // saturate to max. + // c) Compare value with inttofp(lower_bound) to identify values that need + // to saturate to min. + // d) Use select to pick from either zero or the input vector depending on + // whether the comparison indicates that we have an unrepresentable + // value. + // e) Now that the value is safe, fpto[su]i it. + // f) Use our previous comparison results to replace certain zeros with + // int_min or int_max. + + let is_signed = int_min_value != 0; + let int_min_value = int_ty.const_int(int_min_value, is_signed); + let int_max_value = int_ty.const_int(int_max_value, is_signed); + + let lower_bound = if is_signed { + builder.build_signed_int_to_float( + int_ty.const_int(lower_bound, is_signed), + value.get_type(), + "", + ) + } else { + builder.build_unsigned_int_to_float( + int_ty.const_int(lower_bound, is_signed), + value.get_type(), + "", + ) + }; + let upper_bound = if is_signed { + builder.build_signed_int_to_float( + int_ty.const_int(upper_bound, is_signed), + value.get_type(), + "", + ) + } else { + builder.build_unsigned_int_to_float( + int_ty.const_int(upper_bound, is_signed), + value.get_type(), + "", + ) + }; + + let zero = value.get_type().const_zero(); + + let nan_cmp = builder.build_float_compare(FloatPredicate::UNO, value, zero, "nan"); + let above_upper_bound_cmp = + builder.build_float_compare(FloatPredicate::OGT, value, upper_bound, "above_upper_bound"); + let below_lower_bound_cmp = + builder.build_float_compare(FloatPredicate::OLT, value, lower_bound, "below_lower_bound"); + let not_representable = builder.build_or( + builder.build_or(nan_cmp, above_upper_bound_cmp, ""), + below_lower_bound_cmp, + "not_representable_as_int", + ); + let value = builder + .build_select(not_representable, zero, value, "safe_to_convert") + .into_float_value(); + let value = if is_signed { + builder.build_float_to_signed_int(value, int_ty, "as_int") + } else { + builder.build_float_to_unsigned_int(value, int_ty, "as_int") + }; + let value = builder + .build_select(above_upper_bound_cmp, int_max_value, value, "") + .into_int_value(); + let value = builder + .build_select(below_lower_bound_cmp, int_min_value, value, name) + .into_int_value(); + builder.build_bitcast(value, int_ty, "").into_int_value() +} + fn trap_if_not_representable_as_int<'ctx>( builder: &Builder<'ctx>, intrinsics: &Intrinsics<'ctx>, @@ -4459,10 +4549,36 @@ impl<'ctx> FunctionCodeGenerator for LLVMFunctionCodeGenerator<'ct builder.build_float_to_signed_int(v1, intrinsics.i32_ty, &state.var_name()); state.push1(res); } - Operator::I32TruncSSatF32 | Operator::I32TruncSSatF64 => { - let v1 = state.pop1()?.into_float_value(); - let res = - builder.build_float_to_signed_int(v1, intrinsics.i32_ty, &state.var_name()); + Operator::I32TruncSSatF32 => { + let (v, i) = state.pop1_extra()?; + let v = apply_pending_canonicalization(builder, intrinsics, v, i); + let v = v.into_float_value(); + let res = trunc_sat_scalar( + builder, + intrinsics.i32_ty, + std::i32::MIN as u64, + 2147483520u64, // bits as f32: 0x4effffff + std::i32::MIN as u32 as u64, + std::i32::MAX as u32 as u64, + v, + &state.var_name(), + ); + state.push1(res); + } + Operator::I32TruncSSatF64 => { + let (v, i) = state.pop1_extra()?; + let v = apply_pending_canonicalization(builder, intrinsics, v, i); + let v = v.into_float_value(); + let res = trunc_sat_scalar( + builder, + intrinsics.i32_ty, + std::i32::MIN as u64, + std::i32::MAX as u64, + std::i32::MIN as u64, + std::i32::MAX as u64, + v, + &state.var_name(), + ); state.push1(res); } Operator::I64TruncSF32 => { @@ -4492,11 +4608,36 @@ impl<'ctx> FunctionCodeGenerator for LLVMFunctionCodeGenerator<'ct builder.build_float_to_signed_int(v1, intrinsics.i64_ty, &state.var_name()); state.push1(res); } - Operator::I64TruncSSatF32 | Operator::I64TruncSSatF64 => { - let v1 = state.pop1()?.into_float_value(); - - let res = - builder.build_float_to_signed_int(v1, intrinsics.i64_ty, &state.var_name()); + Operator::I64TruncSSatF32 => { + let (v, i) = state.pop1_extra()?; + let v = apply_pending_canonicalization(builder, intrinsics, v, i); + let v = v.into_float_value(); + let res = trunc_sat_scalar( + builder, + intrinsics.i64_ty, + std::i64::MIN as u64, + 9223371487098961920, // bits as f32: 0x5eff_ffff + std::i64::MIN as u64, + std::i64::MAX as u64, + v, + &state.var_name(), + ); + state.push1(res); + } + Operator::I64TruncSSatF64 => { + let (v, i) = state.pop1_extra()?; + let v = apply_pending_canonicalization(builder, intrinsics, v, i); + let v = v.into_float_value(); + let res = trunc_sat_scalar( + builder, + intrinsics.i64_ty, + std::i64::MIN as u64, + 9223372036854774784, // bits as f64: 0x43df_ffff_ffff_ffff + std::i64::MIN as u64, + std::i64::MAX as u64, + v, + &state.var_name(), + ); state.push1(res); } Operator::I32TruncUF32 => { @@ -4525,10 +4666,36 @@ impl<'ctx> FunctionCodeGenerator for LLVMFunctionCodeGenerator<'ct builder.build_float_to_unsigned_int(v1, intrinsics.i32_ty, &state.var_name()); state.push1(res); } - Operator::I32TruncUSatF32 | Operator::I32TruncUSatF64 => { - let v1 = state.pop1()?.into_float_value(); - let res = - builder.build_float_to_unsigned_int(v1, intrinsics.i32_ty, &state.var_name()); + Operator::I32TruncUSatF32 => { + let (v, i) = state.pop1_extra()?; + let v = apply_pending_canonicalization(builder, intrinsics, v, i); + let v = v.into_float_value(); + let res = trunc_sat_scalar( + builder, + intrinsics.i32_ty, + std::u32::MIN as u64, + 4294967040, // bits as f32: 0x4f7fffff + std::u32::MIN as u64, + std::u32::MAX as u64, + v, + &state.var_name(), + ); + state.push1(res); + } + Operator::I32TruncUSatF64 => { + let (v, i) = state.pop1_extra()?; + let v = apply_pending_canonicalization(builder, intrinsics, v, i); + let v = v.into_float_value(); + let res = trunc_sat_scalar( + builder, + intrinsics.i32_ty, + std::u32::MIN as u64, + 4294967295, // bits as f64: 0x41efffffffffffff + std::u32::MIN as u64, + std::u32::MAX as u64, + v, + &state.var_name(), + ); state.push1(res); } Operator::I64TruncUF32 => { @@ -4557,10 +4724,36 @@ impl<'ctx> FunctionCodeGenerator for LLVMFunctionCodeGenerator<'ct builder.build_float_to_unsigned_int(v1, intrinsics.i64_ty, &state.var_name()); state.push1(res); } - Operator::I64TruncUSatF32 | Operator::I64TruncUSatF64 => { - let v1 = state.pop1()?.into_float_value(); - let res = - builder.build_float_to_unsigned_int(v1, intrinsics.i64_ty, &state.var_name()); + Operator::I64TruncUSatF32 => { + let (v, i) = state.pop1_extra()?; + let v = apply_pending_canonicalization(builder, intrinsics, v, i); + let v = v.into_float_value(); + let res = trunc_sat_scalar( + builder, + intrinsics.i64_ty, + std::u64::MIN, + 18446742974197923840, // bits as f32: 0x5f7fffff + std::u64::MIN, + std::u64::MAX, + v, + &state.var_name(), + ); + state.push1(res); + } + Operator::I64TruncUSatF64 => { + let (v, i) = state.pop1_extra()?; + let v = apply_pending_canonicalization(builder, intrinsics, v, i); + let v = v.into_float_value(); + let res = trunc_sat_scalar( + builder, + intrinsics.i64_ty, + std::u64::MIN, + 18446744073709549568u64, // bits as f64: 0x43EFFFFFFFFFFFFF + std::u64::MIN, + std::u64::MAX, + v, + &state.var_name(), + ); state.push1(res); } Operator::F32DemoteF64 => { From 6fe2f434c659478465df02e05f9f68b41fcd0215 Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Thu, 12 Dec 2019 20:32:08 -0800 Subject: [PATCH 03/12] Add tests for non-trapping float to int conversions. This breaks all of conversions.wast on singlepass. LLVM and Cranelift pass. --- lib/spectests/spectests/conversions.wast | 198 +++++++++++++++++++++++ lib/spectests/tests/excludes.txt | 129 ++++++++------- lib/spectests/tests/spectest.rs | 1 + 3 files changed, 263 insertions(+), 65 deletions(-) diff --git a/lib/spectests/spectests/conversions.wast b/lib/spectests/spectests/conversions.wast index 8022a1a6063..746a0ab3c86 100644 --- a/lib/spectests/spectests/conversions.wast +++ b/lib/spectests/spectests/conversions.wast @@ -15,6 +15,14 @@ (func (export "i64.trunc_f32_u") (param $x f32) (result i64) (i64.trunc_f32_u (local.get $x))) (func (export "i64.trunc_f64_s") (param $x f64) (result i64) (i64.trunc_f64_s (local.get $x))) (func (export "i64.trunc_f64_u") (param $x f64) (result i64) (i64.trunc_f64_u (local.get $x))) + (func (export "i32.trunc_sat_f32_s") (param $x f32) (result i32) (i32.trunc_sat_f32_s (local.get $x))) + (func (export "i32.trunc_sat_f32_u") (param $x f32) (result i32) (i32.trunc_sat_f32_u (local.get $x))) + (func (export "i32.trunc_sat_f64_s") (param $x f64) (result i32) (i32.trunc_sat_f64_s (local.get $x))) + (func (export "i32.trunc_sat_f64_u") (param $x f64) (result i32) (i32.trunc_sat_f64_u (local.get $x))) + (func (export "i64.trunc_sat_f32_s") (param $x f32) (result i64) (i64.trunc_sat_f32_s (local.get $x))) + (func (export "i64.trunc_sat_f32_u") (param $x f32) (result i64) (i64.trunc_sat_f32_u (local.get $x))) + (func (export "i64.trunc_sat_f64_s") (param $x f64) (result i64) (i64.trunc_sat_f64_s (local.get $x))) + (func (export "i64.trunc_sat_f64_u") (param $x f64) (result i64) (i64.trunc_sat_f64_u (local.get $x))) (func (export "f32.convert_i32_s") (param $x i32) (result f32) (f32.convert_i32_s (local.get $x))) (func (export "f32.convert_i64_s") (param $x i64) (result f32) (f32.convert_i64_s (local.get $x))) (func (export "f64.convert_i32_s") (param $x i32) (result f64) (f64.convert_i32_s (local.get $x))) @@ -246,6 +254,196 @@ (assert_trap (invoke "i64.trunc_f64_u" (f64.const -nan)) "invalid conversion to integer") (assert_trap (invoke "i64.trunc_f64_u" (f64.const -nan:0x4000000000000)) "invalid conversion to integer") +;; Saturating conversions: test all the same values as the non-saturating conversions. + +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const 0.0)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const -0.0)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const 0x1p-149)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const -0x1p-149)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const 1.0)) (i32.const 1)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const 0x1.19999ap+0)) (i32.const 1)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const 1.5)) (i32.const 1)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const -1.0)) (i32.const -1)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const -0x1.19999ap+0)) (i32.const -1)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const -1.5)) (i32.const -1)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const -1.9)) (i32.const -1)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const -2.0)) (i32.const -2)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const 2147483520.0)) (i32.const 2147483520)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const -2147483648.0)) (i32.const -2147483648)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const 2147483648.0)) (i32.const 0x7fffffff)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const -2147483904.0)) (i32.const 0x80000000)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const inf)) (i32.const 0x7fffffff)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const -inf)) (i32.const 0x80000000)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const nan)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const nan:0x200000)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const -nan)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_s" (f32.const -nan:0x200000)) (i32.const 0)) + +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const 0.0)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const -0.0)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const 0x1p-149)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const -0x1p-149)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const 1.0)) (i32.const 1)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const 0x1.19999ap+0)) (i32.const 1)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const 1.5)) (i32.const 1)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const 1.9)) (i32.const 1)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const 2.0)) (i32.const 2)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const 2147483648)) (i32.const -2147483648)) ;; 0x1.00000p+31 -> 8000 0000 +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const 4294967040.0)) (i32.const -256)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const -0x1.ccccccp-1)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const -0x1.fffffep-1)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const 4294967296.0)) (i32.const 0xffffffff)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const -1.0)) (i32.const 0x00000000)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const inf)) (i32.const 0xffffffff)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const -inf)) (i32.const 0x00000000)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const nan)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const nan:0x200000)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const -nan)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f32_u" (f32.const -nan:0x200000)) (i32.const 0)) + +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const 0.0)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const -0.0)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const 0x0.0000000000001p-1022)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const -0x0.0000000000001p-1022)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const 1.0)) (i32.const 1)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const 0x1.199999999999ap+0)) (i32.const 1)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const 1.5)) (i32.const 1)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const -1.0)) (i32.const -1)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const -0x1.199999999999ap+0)) (i32.const -1)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const -1.5)) (i32.const -1)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const -1.9)) (i32.const -1)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const -2.0)) (i32.const -2)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const 2147483647.0)) (i32.const 2147483647)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const -2147483648.0)) (i32.const -2147483648)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const 2147483648.0)) (i32.const 0x7fffffff)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const -2147483649.0)) (i32.const 0x80000000)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const inf)) (i32.const 0x7fffffff)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const -inf)) (i32.const 0x80000000)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const nan)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const nan:0x4000000000000)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const -nan)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_s" (f64.const -nan:0x4000000000000)) (i32.const 0)) + +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const 0.0)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const -0.0)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const 0x0.0000000000001p-1022)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const -0x0.0000000000001p-1022)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const 1.0)) (i32.const 1)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const 0x1.199999999999ap+0)) (i32.const 1)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const 1.5)) (i32.const 1)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const 1.9)) (i32.const 1)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const 2.0)) (i32.const 2)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const 2147483648)) (i32.const -2147483648)) ;; 0x1.00000p+31 -> 8000 0000 +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const 4294967295.0)) (i32.const -1)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const -0x1.ccccccccccccdp-1)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const -0x1.fffffffffffffp-1)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const 1e8)) (i32.const 100000000)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const 4294967296.0)) (i32.const 0xffffffff)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const -1.0)) (i32.const 0x00000000)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const 1e16)) (i32.const 0xffffffff)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const 1e30)) (i32.const 0xffffffff)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const 9223372036854775808)) (i32.const 0xffffffff)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const inf)) (i32.const 0xffffffff)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const -inf)) (i32.const 0x00000000)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const nan)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const nan:0x4000000000000)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const -nan)) (i32.const 0)) +(assert_return (invoke "i32.trunc_sat_f64_u" (f64.const -nan:0x4000000000000)) (i32.const 0)) + +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const 0.0)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const -0.0)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const 0x1p-149)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const -0x1p-149)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const 1.0)) (i64.const 1)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const 0x1.19999ap+0)) (i64.const 1)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const 1.5)) (i64.const 1)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const -1.0)) (i64.const -1)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const -0x1.19999ap+0)) (i64.const -1)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const -1.5)) (i64.const -1)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const -1.9)) (i64.const -1)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const -2.0)) (i64.const -2)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const 4294967296)) (i64.const 4294967296)) ;; 0x1.00000p+32 -> 1 0000 0000 +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const -4294967296)) (i64.const -4294967296)) ;; -0x1.00000p+32 -> ffff ffff 0000 0000 +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const 9223371487098961920.0)) (i64.const 9223371487098961920)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const -9223372036854775808.0)) (i64.const -9223372036854775808)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const 9223372036854775808.0)) (i64.const 0x7fffffffffffffff)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const -9223373136366403584.0)) (i64.const 0x8000000000000000)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const inf)) (i64.const 0x7fffffffffffffff)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const -inf)) (i64.const 0x8000000000000000)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const nan)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const nan:0x200000)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const -nan)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_s" (f32.const -nan:0x200000)) (i64.const 0)) + +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const 0.0)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const -0.0)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const 0x1p-149)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const -0x1p-149)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const 1.0)) (i64.const 1)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const 0x1.19999ap+0)) (i64.const 1)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const 1.5)) (i64.const 1)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const 4294967296)) (i64.const 4294967296)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const 18446742974197923840.0)) (i64.const -1099511627776)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const -0x1.ccccccp-1)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const -0x1.fffffep-1)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const 18446744073709551616.0)) (i64.const 0xffffffffffffffff)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const -1.0)) (i64.const 0x0000000000000000)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const inf)) (i64.const 0xffffffffffffffff)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const -inf)) (i64.const 0x0000000000000000)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const nan)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const nan:0x200000)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const -nan)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f32_u" (f32.const -nan:0x200000)) (i64.const 0)) + +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const 0.0)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const -0.0)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const 0x0.0000000000001p-1022)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const -0x0.0000000000001p-1022)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const 1.0)) (i64.const 1)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const 0x1.199999999999ap+0)) (i64.const 1)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const 1.5)) (i64.const 1)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const -1.0)) (i64.const -1)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const -0x1.199999999999ap+0)) (i64.const -1)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const -1.5)) (i64.const -1)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const -1.9)) (i64.const -1)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const -2.0)) (i64.const -2)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const 4294967296)) (i64.const 4294967296)) ;; 0x1.00000p+32 -> 1 0000 0000 +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const -4294967296)) (i64.const -4294967296)) ;; -0x1.00000p+32 -> ffff ffff 0000 0000 +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const 9223372036854774784.0)) (i64.const 9223372036854774784)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const -9223372036854775808.0)) (i64.const -9223372036854775808)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const 9223372036854775808.0)) (i64.const 0x7fffffffffffffff)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const -9223372036854777856.0)) (i64.const 0x8000000000000000)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const inf)) (i64.const 0x7fffffffffffffff)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const -inf)) (i64.const 0x8000000000000000)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const nan)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const nan:0x4000000000000)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const -nan)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_s" (f64.const -nan:0x4000000000000)) (i64.const 0)) + +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const 0.0)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const -0.0)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const 0x0.0000000000001p-1022)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const -0x0.0000000000001p-1022)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const 1.0)) (i64.const 1)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const 0x1.199999999999ap+0)) (i64.const 1)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const 1.5)) (i64.const 1)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const 4294967295)) (i64.const 0xffffffff)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const 4294967296)) (i64.const 0x100000000)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const 18446744073709549568.0)) (i64.const -2048)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const -0x1.ccccccccccccdp-1)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const -0x1.fffffffffffffp-1)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const 1e8)) (i64.const 100000000)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const 1e16)) (i64.const 10000000000000000)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const 9223372036854775808)) (i64.const -9223372036854775808)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const 18446744073709551616.0)) (i64.const 0xffffffffffffffff)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const -1.0)) (i64.const 0x0000000000000000)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const inf)) (i64.const 0xffffffffffffffff)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const -inf)) (i64.const 0x0000000000000000)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const nan)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const nan:0x4000000000000)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const -nan)) (i64.const 0)) +(assert_return (invoke "i64.trunc_sat_f64_u" (f64.const -nan:0x4000000000000)) (i64.const 0)) + (assert_return (invoke "f32.convert_i32_s" (i32.const 1)) (f32.const 1.0)) (assert_return (invoke "f32.convert_i32_s" (i32.const -1)) (f32.const -1.0)) (assert_return (invoke "f32.convert_i32_s" (i32.const 0)) (f32.const 0.0)) diff --git a/lib/spectests/tests/excludes.txt b/lib/spectests/tests/excludes.txt index 484c809b733..4919f9338b3 100644 --- a/lib/spectests/tests/excludes.txt +++ b/lib/spectests/tests/excludes.txt @@ -306,7 +306,6 @@ llvm:skip:unwind.wast:*:windows llvm:skip:simd.wast:352:unix # Module - caught panic Any llvm:skip:simd_binaryen.wast:*:unix # Module - caught panic Any - # Singlepass singlepass:skip:simd.wast:* # SIMD not implemented singlepass:skip:simd_binaryen.wast:* # SIMD not implemented @@ -405,73 +404,73 @@ singlepass:fail:call_indirect.wast:493 # AssertTrap - expected trap, got Runtime singlepass:fail:call_indirect.wast:494 # AssertTrap - expected trap, got Runtime:Error unknown error singlepass:fail:call_indirect.wast:500 # AssertTrap - expected trap, got Runtime:Error unknown error singlepass:fail:call_indirect.wast:501 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:75 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:76 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:77 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:78 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:79 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:80 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:81 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:82 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:97 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:98 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:99 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:100 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:101 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:102 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:103 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:104 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:120 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:121 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:122 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:123 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:124 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:125 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:126 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:127 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:143 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:144 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:145 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:146 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:147 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:148 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:149 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:150 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:83 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:84 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:85 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:86 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:87 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:88 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:89 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:90 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:105 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:106 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:107 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:108 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:109 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:110 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:111 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:112 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:128 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:129 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:130 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:131 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:132 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:133 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:134 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:135 # AssertTrap - expected trap, got Runtime:Error unknown error singlepass:fail:conversions.wast:151 # AssertTrap - expected trap, got Runtime:Error unknown error singlepass:fail:conversions.wast:152 # AssertTrap - expected trap, got Runtime:Error unknown error singlepass:fail:conversions.wast:153 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:171 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:172 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:173 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:174 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:175 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:176 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:177 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:178 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:191 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:192 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:193 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:194 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:195 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:196 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:197 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:198 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:216 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:217 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:218 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:219 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:220 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:221 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:222 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:223 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:240 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:241 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:242 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:243 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:244 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:245 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:246 # AssertTrap - expected trap, got Runtime:Error unknown error -singlepass:fail:conversions.wast:247 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:154 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:155 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:156 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:157 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:158 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:159 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:160 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:161 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:179 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:180 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:181 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:182 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:183 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:184 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:185 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:186 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:199 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:200 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:201 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:202 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:203 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:204 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:205 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:206 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:224 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:225 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:226 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:227 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:228 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:229 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:230 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:231 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:248 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:249 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:250 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:251 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:252 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:253 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:254 # AssertTrap - expected trap, got Runtime:Error unknown error +singlepass:fail:conversions.wast:255 # AssertTrap - expected trap, got Runtime:Error unknown error singlepass:fail:elem.wast:353 # AssertTrap - expected trap, got Runtime:Error unknown error singlepass:fail:func_ptrs.wast:78 # AssertTrap - expected trap, got Runtime:Error unknown error singlepass:fail:func_ptrs.wast:79 # AssertTrap - expected trap, got Runtime:Error unknown error diff --git a/lib/spectests/tests/spectest.rs b/lib/spectests/tests/spectest.rs index 3b03b598f4b..518e545d6bc 100644 --- a/lib/spectests/tests/spectest.rs +++ b/lib/spectests/tests/spectest.rs @@ -250,6 +250,7 @@ mod tests { features.enable_simd(); features.enable_threads(); features.enable_sign_extension(); + features.enable_sat_float_to_int(); let mut parser: ScriptParser = ScriptParser::from_source_and_name_with_features(&source, filename, features) .expect(&format!("Failed to parse script {}", &filename)); From 442c40f5f167d6fc45bf66151d1a61e4a517f75f Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Fri, 13 Dec 2019 17:08:46 -0800 Subject: [PATCH 04/12] Initial implementatio of trunc-sat instructions in singlepass. 27 test failures. --- lib/singlepass-backend/src/codegen_x64.rs | 469 ++++++++++++++++++++-- 1 file changed, 441 insertions(+), 28 deletions(-) diff --git a/lib/singlepass-backend/src/codegen_x64.rs b/lib/singlepass-backend/src/codegen_x64.rs index 57cffc7851a..a86d15c6c3c 100644 --- a/lib/singlepass-backend/src/codegen_x64.rs +++ b/lib/singlepass-backend/src/codegen_x64.rs @@ -2021,20 +2021,19 @@ impl X64FunctionCode { Ok(()) } - // Checks for underflow/overflow/nan before IxxTrunc{U/S}F32. + // Checks for underflow/overflow/nan. fn emit_f32_int_conv_check( a: &mut Assembler, m: &mut Machine, reg: XMM, lower_bound: f32, upper_bound: f32, + fail_label: ::Label, + succeed_label: ::Label, ) { let lower_bound = f32::to_bits(lower_bound); let upper_bound = f32::to_bits(upper_bound); - let trap = a.get_label(); - let end = a.get_label(); - let tmp = m.acquire_temp_gpr().unwrap(); let tmp_x = m.acquire_temp_xmm().unwrap(); @@ -2044,7 +2043,7 @@ impl X64FunctionCode { a.emit_vcmpless(reg, XMMOrMemory::XMM(tmp_x), tmp_x); a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp)); a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - a.emit_jmp(Condition::NotEqual, trap); + a.emit_jmp(Condition::NotEqual, fail_label); // Overflow. a.emit_mov(Size::S32, Location::Imm32(upper_bound), Location::GPR(tmp)); @@ -2052,37 +2051,69 @@ impl X64FunctionCode { a.emit_vcmpgess(reg, XMMOrMemory::XMM(tmp_x), tmp_x); a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp)); a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - a.emit_jmp(Condition::NotEqual, trap); + a.emit_jmp(Condition::NotEqual, fail_label); // NaN. a.emit_vcmpeqss(reg, XMMOrMemory::XMM(reg), tmp_x); a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp)); a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - a.emit_jmp(Condition::Equal, trap); + a.emit_jmp(Condition::Equal, fail_label); + + a.emit_jmp(Condition::None, succeed_label); + + m.release_temp_xmm(tmp_x); + m.release_temp_gpr(tmp); + } - a.emit_jmp(Condition::None, end); + // Checks for underflow/overflow/nan before IxxTrunc{U/S}F32. + fn emit_f32_int_conv_check_trap( + a: &mut Assembler, + m: &mut Machine, + reg: XMM, + lower_bound: f32, + upper_bound: f32, + ) { + let trap = a.get_label(); + let end = a.get_label(); + + Self::emit_f32_int_conv_check(a, m, reg, lower_bound, upper_bound, trap, end); a.emit_label(trap); a.emit_ud2(); a.emit_label(end); + } - m.release_temp_xmm(tmp_x); - m.release_temp_gpr(tmp); + fn emit_f32_int_conv_check_zero( + a: &mut Assembler, + m: &mut Machine, + reg: XMM, + lower_bound: f32, + upper_bound: f32, + ) { + let trap = a.get_label(); + let end = a.get_label(); + + Self::emit_f32_int_conv_check(a, m, reg, lower_bound, upper_bound, trap, end); + a.emit_label(trap); + let gpr = m.acquire_temp_gpr().unwrap(); + a.emit_mov(Size::S32, Location::Imm32(0), Location::GPR(gpr)); + a.emit_mov(Size::S32, Location::GPR(gpr), Location::XMM(reg)); + m.release_temp_gpr(gpr); + a.emit_label(end); } - // Checks for underflow/overflow/nan before IxxTrunc{U/S}F64. + // Checks for underflow/overflow/nan. fn emit_f64_int_conv_check( a: &mut Assembler, m: &mut Machine, reg: XMM, lower_bound: f64, upper_bound: f64, + fail_label: ::Label, + succeed_label: ::Label, ) { let lower_bound = f64::to_bits(lower_bound); let upper_bound = f64::to_bits(upper_bound); - let trap = a.get_label(); - let end = a.get_label(); - let tmp = m.acquire_temp_gpr().unwrap(); let tmp_x = m.acquire_temp_xmm().unwrap(); @@ -2092,7 +2123,7 @@ impl X64FunctionCode { a.emit_vcmplesd(reg, XMMOrMemory::XMM(tmp_x), tmp_x); a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp)); a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - a.emit_jmp(Condition::NotEqual, trap); + a.emit_jmp(Condition::NotEqual, fail_label); // Overflow. a.emit_mov(Size::S64, Location::Imm64(upper_bound), Location::GPR(tmp)); @@ -2100,21 +2131,54 @@ impl X64FunctionCode { a.emit_vcmpgesd(reg, XMMOrMemory::XMM(tmp_x), tmp_x); a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp)); a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - a.emit_jmp(Condition::NotEqual, trap); + a.emit_jmp(Condition::NotEqual, fail_label); // NaN. a.emit_vcmpeqsd(reg, XMMOrMemory::XMM(reg), tmp_x); a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp)); a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - a.emit_jmp(Condition::Equal, trap); + a.emit_jmp(Condition::Equal, fail_label); + + a.emit_jmp(Condition::None, succeed_label); + + m.release_temp_xmm(tmp_x); + m.release_temp_gpr(tmp); + } + + // Checks for underflow/overflow/nan before IxxTrunc{U/S}F64. + fn emit_f64_int_conv_check_trap( + a: &mut Assembler, + m: &mut Machine, + reg: XMM, + lower_bound: f64, + upper_bound: f64, + ) { + let trap = a.get_label(); + let end = a.get_label(); - a.emit_jmp(Condition::None, end); + Self::emit_f64_int_conv_check(a, m, reg, lower_bound, upper_bound, trap, end); a.emit_label(trap); a.emit_ud2(); a.emit_label(end); + } - m.release_temp_xmm(tmp_x); - m.release_temp_gpr(tmp); + fn emit_f64_int_conv_check_zero( + a: &mut Assembler, + m: &mut Machine, + reg: XMM, + lower_bound: f64, + upper_bound: f64, + ) { + let trap = a.get_label(); + let end = a.get_label(); + + Self::emit_f64_int_conv_check(a, m, reg, lower_bound, upper_bound, trap, end); + a.emit_label(trap); + let gpr = m.acquire_temp_gpr().unwrap(); + a.emit_mov(Size::S64, Location::Imm64(0), Location::GPR(gpr)); + a.emit_mov(Size::S64, Location::GPR(gpr), Location::XMM(reg)); + m.release_temp_gpr(gpr); + a.emit_label(end); } pub fn get_state_diff( @@ -4557,7 +4621,13 @@ impl FunctionCodeGenerator for X64FunctionCode { loc, Location::XMM(tmp_in), ); - Self::emit_f32_int_conv_check(a, &mut self.machine, tmp_in, -1.0, 4294967296.0); + Self::emit_f32_int_conv_check_trap( + a, + &mut self.machine, + tmp_in, + -1.0, + 4294967296.0, + ); a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); a.emit_mov(Size::S32, Location::GPR(tmp_out), ret); @@ -4567,6 +4637,41 @@ impl FunctionCodeGenerator for X64FunctionCode { } } + Operator::I32TruncUSatF32 => { + let loc = + get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap()); + let ret = self.machine.acquire_locations( + a, + &[(WpType::I32, MachineValue::WasmStack(self.value_stack.len()))], + false, + )[0]; + self.value_stack.push(ret); + + let tmp_out = self.machine.acquire_temp_gpr().unwrap(); + let tmp_in = self.machine.acquire_temp_xmm().unwrap(); + Self::emit_relaxed_binop( + a, + &mut self.machine, + Assembler::emit_mov, + Size::S32, + loc, + Location::XMM(tmp_in), + ); + Self::emit_f32_int_conv_check_zero( + a, + &mut self.machine, + tmp_in, + -1.0, + 4294967296.0, + ); + + a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + a.emit_mov(Size::S32, Location::GPR(tmp_out), ret); + + self.machine.release_temp_xmm(tmp_in); + self.machine.release_temp_gpr(tmp_out); + } + Operator::I32TruncSF32 => { let loc = get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap()); @@ -4611,7 +4716,7 @@ impl FunctionCodeGenerator for X64FunctionCode { loc, Location::XMM(tmp_in), ); - Self::emit_f32_int_conv_check( + Self::emit_f32_int_conv_check_trap( a, &mut self.machine, tmp_in, @@ -4626,6 +4731,41 @@ impl FunctionCodeGenerator for X64FunctionCode { self.machine.release_temp_gpr(tmp_out); } } + Operator::I32TruncSSatF32 => { + let loc = + get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap()); + let ret = self.machine.acquire_locations( + a, + &[(WpType::I32, MachineValue::WasmStack(self.value_stack.len()))], + false, + )[0]; + self.value_stack.push(ret); + + let tmp_out = self.machine.acquire_temp_gpr().unwrap(); + let tmp_in = self.machine.acquire_temp_xmm().unwrap(); + + Self::emit_relaxed_binop( + a, + &mut self.machine, + Assembler::emit_mov, + Size::S32, + loc, + Location::XMM(tmp_in), + ); + Self::emit_f32_int_conv_check_zero( + a, + &mut self.machine, + tmp_in, + -2147483904.0, + 2147483648.0, + ); + + a.emit_cvttss2si_32(XMMOrMemory::XMM(tmp_in), tmp_out); + a.emit_mov(Size::S32, Location::GPR(tmp_out), ret); + + self.machine.release_temp_xmm(tmp_in); + self.machine.release_temp_gpr(tmp_out); + } Operator::I64TruncSF32 => { let loc = @@ -4671,7 +4811,7 @@ impl FunctionCodeGenerator for X64FunctionCode { loc, Location::XMM(tmp_in), ); - Self::emit_f32_int_conv_check( + Self::emit_f32_int_conv_check_trap( a, &mut self.machine, tmp_in, @@ -4686,6 +4826,41 @@ impl FunctionCodeGenerator for X64FunctionCode { } } + Operator::I64TruncSSatF32 => { + let loc = + get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap()); + let ret = self.machine.acquire_locations( + a, + &[(WpType::I64, MachineValue::WasmStack(self.value_stack.len()))], + false, + )[0]; + self.value_stack.push(ret); + + let tmp_out = self.machine.acquire_temp_gpr().unwrap(); + let tmp_in = self.machine.acquire_temp_xmm().unwrap(); + + Self::emit_relaxed_binop( + a, + &mut self.machine, + Assembler::emit_mov, + Size::S32, + loc, + Location::XMM(tmp_in), + ); + Self::emit_f32_int_conv_check_zero( + a, + &mut self.machine, + tmp_in, + -9223373136366403584.0, + 9223372036854775808.0, + ); + a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + a.emit_mov(Size::S64, Location::GPR(tmp_out), ret); + + self.machine.release_temp_xmm(tmp_in); + self.machine.release_temp_gpr(tmp_out); + } + Operator::I64TruncUF32 => { let loc = get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap()); @@ -4730,7 +4905,7 @@ impl FunctionCodeGenerator for X64FunctionCode { loc, Location::XMM(tmp_in), ); - Self::emit_f32_int_conv_check( + Self::emit_f32_int_conv_check_trap( a, &mut self.machine, tmp_in, @@ -4769,6 +4944,65 @@ impl FunctionCodeGenerator for X64FunctionCode { self.machine.release_temp_gpr(tmp_out); } } + Operator::I64TruncUSatF32 => { + let loc = + get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap()); + let ret = self.machine.acquire_locations( + a, + &[(WpType::I64, MachineValue::WasmStack(self.value_stack.len()))], + false, + )[0]; + self.value_stack.push(ret); + + let tmp_out = self.machine.acquire_temp_gpr().unwrap(); + let tmp_in = self.machine.acquire_temp_xmm().unwrap(); // xmm2 + + Self::emit_relaxed_binop( + a, + &mut self.machine, + Assembler::emit_mov, + Size::S32, + loc, + Location::XMM(tmp_in), + ); + Self::emit_f32_int_conv_check_zero( + a, + &mut self.machine, + tmp_in, + -1.0, + 18446744073709551616.0, + ); + + let tmp = self.machine.acquire_temp_gpr().unwrap(); // r15 + let tmp_x1 = self.machine.acquire_temp_xmm().unwrap(); // xmm1 + let tmp_x2 = self.machine.acquire_temp_xmm().unwrap(); // xmm3 + + a.emit_mov( + Size::S32, + Location::Imm32(1593835520u32), + Location::GPR(tmp), + ); //float 9.22337203E+18 + a.emit_mov(Size::S32, Location::GPR(tmp), Location::XMM(tmp_x1)); + a.emit_mov(Size::S32, Location::XMM(tmp_in), Location::XMM(tmp_x2)); + a.emit_vsubss(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in); + a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + a.emit_mov( + Size::S64, + Location::Imm64(0x8000000000000000u64), + Location::GPR(tmp), + ); + a.emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp)); + a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out); + a.emit_ucomiss(XMMOrMemory::XMM(tmp_x1), tmp_x2); + a.emit_cmovae_gpr_64(tmp, tmp_out); + a.emit_mov(Size::S64, Location::GPR(tmp_out), ret); + + self.machine.release_temp_xmm(tmp_x2); + self.machine.release_temp_xmm(tmp_x1); + self.machine.release_temp_gpr(tmp); + self.machine.release_temp_xmm(tmp_in); + self.machine.release_temp_gpr(tmp_out); + } Operator::I32TruncUF64 => { let loc = @@ -4814,7 +5048,13 @@ impl FunctionCodeGenerator for X64FunctionCode { loc, Location::XMM(tmp_in), ); - Self::emit_f64_int_conv_check(a, &mut self.machine, tmp_in, -1.0, 4294967296.0); + Self::emit_f64_int_conv_check_trap( + a, + &mut self.machine, + tmp_in, + -1.0, + 4294967296.0, + ); a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); a.emit_mov(Size::S32, Location::GPR(tmp_out), ret); @@ -4824,6 +5064,42 @@ impl FunctionCodeGenerator for X64FunctionCode { } } + Operator::I32TruncUSatF64 => { + let loc = + get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap()); + let ret = self.machine.acquire_locations( + a, + &[(WpType::I32, MachineValue::WasmStack(self.value_stack.len()))], + false, + )[0]; + self.value_stack.push(ret); + + let tmp_out = self.machine.acquire_temp_gpr().unwrap(); + let tmp_in = self.machine.acquire_temp_xmm().unwrap(); + + Self::emit_relaxed_binop( + a, + &mut self.machine, + Assembler::emit_mov, + Size::S64, + loc, + Location::XMM(tmp_in), + ); + Self::emit_f64_int_conv_check_zero( + a, + &mut self.machine, + tmp_in, + -1.0, + 4294967296.0, + ); + + a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + a.emit_mov(Size::S32, Location::GPR(tmp_out), ret); + + self.machine.release_temp_xmm(tmp_in); + self.machine.release_temp_gpr(tmp_out); + } + Operator::I32TruncSF64 => { let loc = get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap()); @@ -4873,7 +5149,7 @@ impl FunctionCodeGenerator for X64FunctionCode { } }; - Self::emit_f64_int_conv_check( + Self::emit_f64_int_conv_check_trap( a, &mut self.machine, real_in, @@ -4889,6 +5165,47 @@ impl FunctionCodeGenerator for X64FunctionCode { } } + Operator::I32TruncSSatF64 => { + let loc = + get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap()); + let ret = self.machine.acquire_locations( + a, + &[(WpType::I32, MachineValue::WasmStack(self.value_stack.len()))], + false, + )[0]; + self.value_stack.push(ret); + + let tmp_out = self.machine.acquire_temp_gpr().unwrap(); + let tmp_in = self.machine.acquire_temp_xmm().unwrap(); + + let real_in = match loc { + Location::Imm32(_) | Location::Imm64(_) => { + a.emit_mov(Size::S64, loc, Location::GPR(tmp_out)); + a.emit_mov(Size::S64, Location::GPR(tmp_out), Location::XMM(tmp_in)); + tmp_in + } + Location::XMM(x) => x, + _ => { + a.emit_mov(Size::S64, loc, Location::XMM(tmp_in)); + tmp_in + } + }; + + Self::emit_f64_int_conv_check_zero( + a, + &mut self.machine, + real_in, + -2147483649.0, + 2147483648.0, + ); + + a.emit_cvttsd2si_32(XMMOrMemory::XMM(real_in), tmp_out); + a.emit_mov(Size::S32, Location::GPR(tmp_out), ret); + + self.machine.release_temp_xmm(tmp_in); + self.machine.release_temp_gpr(tmp_out); + } + Operator::I64TruncSF64 => { let loc = get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap()); @@ -4933,7 +5250,7 @@ impl FunctionCodeGenerator for X64FunctionCode { loc, Location::XMM(tmp_in), ); - Self::emit_f64_int_conv_check( + Self::emit_f64_int_conv_check_trap( a, &mut self.machine, tmp_in, @@ -4949,6 +5266,42 @@ impl FunctionCodeGenerator for X64FunctionCode { } } + Operator::I64TruncSSatF64 => { + let loc = + get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap()); + let ret = self.machine.acquire_locations( + a, + &[(WpType::I64, MachineValue::WasmStack(self.value_stack.len()))], + false, + )[0]; + self.value_stack.push(ret); + + let tmp_out = self.machine.acquire_temp_gpr().unwrap(); + let tmp_in = self.machine.acquire_temp_xmm().unwrap(); + + Self::emit_relaxed_binop( + a, + &mut self.machine, + Assembler::emit_mov, + Size::S64, + loc, + Location::XMM(tmp_in), + ); + Self::emit_f64_int_conv_check_zero( + a, + &mut self.machine, + tmp_in, + -9223372036854777856.0, + 9223372036854775808.0, + ); + + a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + a.emit_mov(Size::S64, Location::GPR(tmp_out), ret); + + self.machine.release_temp_xmm(tmp_in); + self.machine.release_temp_gpr(tmp_out); + } + Operator::I64TruncUF64 => { let loc = get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap()); @@ -4993,7 +5346,7 @@ impl FunctionCodeGenerator for X64FunctionCode { loc, Location::XMM(tmp_in), ); - Self::emit_f64_int_conv_check( + Self::emit_f64_int_conv_check_trap( a, &mut self.machine, tmp_in, @@ -5033,6 +5386,66 @@ impl FunctionCodeGenerator for X64FunctionCode { } } + Operator::I64TruncUSatF64 => { + let loc = + get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap()); + let ret = self.machine.acquire_locations( + a, + &[(WpType::I64, MachineValue::WasmStack(self.value_stack.len()))], + false, + )[0]; + self.value_stack.push(ret); + + let tmp_out = self.machine.acquire_temp_gpr().unwrap(); + let tmp_in = self.machine.acquire_temp_xmm().unwrap(); // xmm2 + + Self::emit_relaxed_binop( + a, + &mut self.machine, + Assembler::emit_mov, + Size::S64, + loc, + Location::XMM(tmp_in), + ); + Self::emit_f64_int_conv_check_zero( + a, + &mut self.machine, + tmp_in, + -1.0, + 18446744073709551616.0, + ); + + let tmp = self.machine.acquire_temp_gpr().unwrap(); // r15 + let tmp_x1 = self.machine.acquire_temp_xmm().unwrap(); // xmm1 + let tmp_x2 = self.machine.acquire_temp_xmm().unwrap(); // xmm3 + + a.emit_mov( + Size::S64, + Location::Imm64(4890909195324358656u64), + Location::GPR(tmp), + ); //double 9.2233720368547758E+18 + a.emit_mov(Size::S64, Location::GPR(tmp), Location::XMM(tmp_x1)); + a.emit_mov(Size::S64, Location::XMM(tmp_in), Location::XMM(tmp_x2)); + a.emit_vsubsd(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in); + a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + a.emit_mov( + Size::S64, + Location::Imm64(0x8000000000000000u64), + Location::GPR(tmp), + ); + a.emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp)); + a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out); + a.emit_ucomisd(XMMOrMemory::XMM(tmp_x1), tmp_x2); + a.emit_cmovae_gpr_64(tmp, tmp_out); + a.emit_mov(Size::S64, Location::GPR(tmp_out), ret); + + self.machine.release_temp_xmm(tmp_x2); + self.machine.release_temp_xmm(tmp_x1); + self.machine.release_temp_gpr(tmp); + self.machine.release_temp_xmm(tmp_in); + self.machine.release_temp_gpr(tmp_out); + } + Operator::F32ConvertSI32 => { let loc = get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap()); From d52c193e84112f15eaa55861b7d7fa5870e61204 Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Mon, 16 Dec 2019 14:29:56 -0800 Subject: [PATCH 05/12] Finish implementation of trunc_sat in singlepass x86-64. --- lib/singlepass-backend/src/codegen_x64.rs | 407 ++++++++++++++++------ 1 file changed, 309 insertions(+), 98 deletions(-) diff --git a/lib/singlepass-backend/src/codegen_x64.rs b/lib/singlepass-backend/src/codegen_x64.rs index a86d15c6c3c..42eab61ce22 100644 --- a/lib/singlepass-backend/src/codegen_x64.rs +++ b/lib/singlepass-backend/src/codegen_x64.rs @@ -2028,7 +2028,9 @@ impl X64FunctionCode { reg: XMM, lower_bound: f32, upper_bound: f32, - fail_label: ::Label, + underflow_label: ::Label, + overflow_label: ::Label, + nan_label: ::Label, succeed_label: ::Label, ) { let lower_bound = f32::to_bits(lower_bound); @@ -2043,7 +2045,7 @@ impl X64FunctionCode { a.emit_vcmpless(reg, XMMOrMemory::XMM(tmp_x), tmp_x); a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp)); a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - a.emit_jmp(Condition::NotEqual, fail_label); + a.emit_jmp(Condition::NotEqual, underflow_label); // Overflow. a.emit_mov(Size::S32, Location::Imm32(upper_bound), Location::GPR(tmp)); @@ -2051,13 +2053,13 @@ impl X64FunctionCode { a.emit_vcmpgess(reg, XMMOrMemory::XMM(tmp_x), tmp_x); a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp)); a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - a.emit_jmp(Condition::NotEqual, fail_label); + a.emit_jmp(Condition::NotEqual, overflow_label); // NaN. a.emit_vcmpeqss(reg, XMMOrMemory::XMM(reg), tmp_x); a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp)); a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - a.emit_jmp(Condition::Equal, fail_label); + a.emit_jmp(Condition::Equal, nan_label); a.emit_jmp(Condition::None, succeed_label); @@ -2076,28 +2078,70 @@ impl X64FunctionCode { let trap = a.get_label(); let end = a.get_label(); - Self::emit_f32_int_conv_check(a, m, reg, lower_bound, upper_bound, trap, end); + Self::emit_f32_int_conv_check(a, m, reg, lower_bound, upper_bound, trap, trap, trap, end); a.emit_label(trap); a.emit_ud2(); a.emit_label(end); } - fn emit_f32_int_conv_check_zero( + fn emit_f32_int_conv_check_sat< + F1: FnOnce(&mut Assembler, &mut Machine), + F2: FnOnce(&mut Assembler, &mut Machine), + F3: FnOnce(&mut Assembler, &mut Machine), + F4: FnOnce(&mut Assembler, &mut Machine), + >( a: &mut Assembler, m: &mut Machine, reg: XMM, lower_bound: f32, upper_bound: f32, + underflow_cb: F1, + overflow_cb: F2, + nan_cb: Option, + convert_cb: F4, ) { - let trap = a.get_label(); + // As an optimization nan_cb is optional, and when set to None we turn + // use 'underflow' as the 'nan' label. This is useful for callers who + // set the return value to zero for both underflow and nan. + + let underflow = a.get_label(); + let overflow = a.get_label(); + let nan = if nan_cb.is_some() { + a.get_label() + } else { + underflow + }; + let convert = a.get_label(); let end = a.get_label(); - Self::emit_f32_int_conv_check(a, m, reg, lower_bound, upper_bound, trap, end); - a.emit_label(trap); - let gpr = m.acquire_temp_gpr().unwrap(); - a.emit_mov(Size::S32, Location::Imm32(0), Location::GPR(gpr)); - a.emit_mov(Size::S32, Location::GPR(gpr), Location::XMM(reg)); - m.release_temp_gpr(gpr); + Self::emit_f32_int_conv_check( + a, + m, + reg, + lower_bound, + upper_bound, + underflow, + overflow, + nan, + convert, + ); + + a.emit_label(underflow); + underflow_cb(a, m); + a.emit_jmp(Condition::None, end); + + a.emit_label(overflow); + overflow_cb(a, m); + a.emit_jmp(Condition::None, end); + + if let Some(cb) = nan_cb { + a.emit_label(nan); + cb(a, m); + a.emit_jmp(Condition::None, end); + } + + a.emit_label(convert); + convert_cb(a, m); a.emit_label(end); } @@ -2108,7 +2152,9 @@ impl X64FunctionCode { reg: XMM, lower_bound: f64, upper_bound: f64, - fail_label: ::Label, + underflow_label: ::Label, + overflow_label: ::Label, + nan_label: ::Label, succeed_label: ::Label, ) { let lower_bound = f64::to_bits(lower_bound); @@ -2123,7 +2169,7 @@ impl X64FunctionCode { a.emit_vcmplesd(reg, XMMOrMemory::XMM(tmp_x), tmp_x); a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp)); a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - a.emit_jmp(Condition::NotEqual, fail_label); + a.emit_jmp(Condition::NotEqual, underflow_label); // Overflow. a.emit_mov(Size::S64, Location::Imm64(upper_bound), Location::GPR(tmp)); @@ -2131,13 +2177,13 @@ impl X64FunctionCode { a.emit_vcmpgesd(reg, XMMOrMemory::XMM(tmp_x), tmp_x); a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp)); a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - a.emit_jmp(Condition::NotEqual, fail_label); + a.emit_jmp(Condition::NotEqual, overflow_label); // NaN. a.emit_vcmpeqsd(reg, XMMOrMemory::XMM(reg), tmp_x); a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp)); a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - a.emit_jmp(Condition::Equal, fail_label); + a.emit_jmp(Condition::Equal, nan_label); a.emit_jmp(Condition::None, succeed_label); @@ -2156,28 +2202,70 @@ impl X64FunctionCode { let trap = a.get_label(); let end = a.get_label(); - Self::emit_f64_int_conv_check(a, m, reg, lower_bound, upper_bound, trap, end); + Self::emit_f64_int_conv_check(a, m, reg, lower_bound, upper_bound, trap, trap, trap, end); a.emit_label(trap); a.emit_ud2(); a.emit_label(end); } - fn emit_f64_int_conv_check_zero( + fn emit_f64_int_conv_check_sat< + F1: FnOnce(&mut Assembler, &mut Machine), + F2: FnOnce(&mut Assembler, &mut Machine), + F3: FnOnce(&mut Assembler, &mut Machine), + F4: FnOnce(&mut Assembler, &mut Machine), + >( a: &mut Assembler, m: &mut Machine, reg: XMM, lower_bound: f64, upper_bound: f64, + underflow_cb: F1, + overflow_cb: F2, + nan_cb: Option, + convert_cb: F4, ) { - let trap = a.get_label(); + // As an optimization nan_cb is optional, and when set to None we turn + // use 'underflow' as the 'nan' label. This is useful for callers who + // set the return value to zero for both underflow and nan. + + let underflow = a.get_label(); + let overflow = a.get_label(); + let nan = if nan_cb.is_some() { + a.get_label() + } else { + underflow + }; + let convert = a.get_label(); let end = a.get_label(); - Self::emit_f64_int_conv_check(a, m, reg, lower_bound, upper_bound, trap, end); - a.emit_label(trap); - let gpr = m.acquire_temp_gpr().unwrap(); - a.emit_mov(Size::S64, Location::Imm64(0), Location::GPR(gpr)); - a.emit_mov(Size::S64, Location::GPR(gpr), Location::XMM(reg)); - m.release_temp_gpr(gpr); + Self::emit_f64_int_conv_check( + a, + m, + reg, + lower_bound, + upper_bound, + underflow, + overflow, + nan, + convert, + ); + + a.emit_label(underflow); + underflow_cb(a, m); + a.emit_jmp(Condition::None, end); + + a.emit_label(overflow); + overflow_cb(a, m); + a.emit_jmp(Condition::None, end); + + if let Some(cb) = nan_cb { + a.emit_label(nan); + cb(a, m); + a.emit_jmp(Condition::None, end); + } + + a.emit_label(convert); + convert_cb(a, m); a.emit_label(end); } @@ -4657,17 +4745,29 @@ impl FunctionCodeGenerator for X64FunctionCode { loc, Location::XMM(tmp_in), ); - Self::emit_f32_int_conv_check_zero( + Self::emit_f32_int_conv_check_sat( a, &mut self.machine, tmp_in, -1.0, 4294967296.0, + |a, _m| { + a.emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)); + }, + |a, _m| { + a.emit_mov( + Size::S32, + Location::Imm32(std::u32::MAX), + Location::GPR(tmp_out), + ); + }, + None::, + |a, _m| { + a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + }, ); - a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); a.emit_mov(Size::S32, Location::GPR(tmp_out), ret); - self.machine.release_temp_xmm(tmp_in); self.machine.release_temp_gpr(tmp_out); } @@ -4752,17 +4852,35 @@ impl FunctionCodeGenerator for X64FunctionCode { loc, Location::XMM(tmp_in), ); - Self::emit_f32_int_conv_check_zero( + Self::emit_f32_int_conv_check_sat( a, &mut self.machine, tmp_in, -2147483904.0, 2147483648.0, + |a, _m| { + a.emit_mov( + Size::S32, + Location::Imm32(std::i32::MIN as u32), + Location::GPR(tmp_out), + ); + }, + |a, _m| { + a.emit_mov( + Size::S32, + Location::Imm32(std::i32::MAX as u32), + Location::GPR(tmp_out), + ); + }, + Some(|a: &mut Assembler, _m: &mut Machine| { + a.emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)); + }), + |a, _m| { + a.emit_cvttss2si_32(XMMOrMemory::XMM(tmp_in), tmp_out); + }, ); - a.emit_cvttss2si_32(XMMOrMemory::XMM(tmp_in), tmp_out); a.emit_mov(Size::S32, Location::GPR(tmp_out), ret); - self.machine.release_temp_xmm(tmp_in); self.machine.release_temp_gpr(tmp_out); } @@ -4847,16 +4965,35 @@ impl FunctionCodeGenerator for X64FunctionCode { loc, Location::XMM(tmp_in), ); - Self::emit_f32_int_conv_check_zero( + Self::emit_f32_int_conv_check_sat( a, &mut self.machine, tmp_in, -9223373136366403584.0, 9223372036854775808.0, + |a, _m| { + a.emit_mov( + Size::S64, + Location::Imm64(std::i64::MIN as u64), + Location::GPR(tmp_out), + ); + }, + |a, _m| { + a.emit_mov( + Size::S64, + Location::Imm64(std::i64::MAX as u64), + Location::GPR(tmp_out), + ); + }, + Some(|a: &mut Assembler, _m: &mut Machine| { + a.emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)); + }), + |a, _m| { + a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + }, ); - a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); - a.emit_mov(Size::S64, Location::GPR(tmp_out), ret); + a.emit_mov(Size::S64, Location::GPR(tmp_out), ret); self.machine.release_temp_xmm(tmp_in); self.machine.release_temp_gpr(tmp_out); } @@ -4965,41 +5102,54 @@ impl FunctionCodeGenerator for X64FunctionCode { loc, Location::XMM(tmp_in), ); - Self::emit_f32_int_conv_check_zero( + Self::emit_f32_int_conv_check_sat( a, &mut self.machine, tmp_in, -1.0, 18446744073709551616.0, - ); - - let tmp = self.machine.acquire_temp_gpr().unwrap(); // r15 - let tmp_x1 = self.machine.acquire_temp_xmm().unwrap(); // xmm1 - let tmp_x2 = self.machine.acquire_temp_xmm().unwrap(); // xmm3 + |a, _m| { + a.emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)); + }, + |a, _m| { + a.emit_mov( + Size::S64, + Location::Imm64(std::u64::MAX), + Location::GPR(tmp_out), + ); + }, + None::, + |a, m| { + let tmp = m.acquire_temp_gpr().unwrap(); // r15 + let tmp_x1 = m.acquire_temp_xmm().unwrap(); // xmm1 + let tmp_x2 = m.acquire_temp_xmm().unwrap(); // xmm3 - a.emit_mov( - Size::S32, - Location::Imm32(1593835520u32), - Location::GPR(tmp), - ); //float 9.22337203E+18 - a.emit_mov(Size::S32, Location::GPR(tmp), Location::XMM(tmp_x1)); - a.emit_mov(Size::S32, Location::XMM(tmp_in), Location::XMM(tmp_x2)); - a.emit_vsubss(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in); - a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); - a.emit_mov( - Size::S64, - Location::Imm64(0x8000000000000000u64), - Location::GPR(tmp), + a.emit_mov( + Size::S32, + Location::Imm32(1593835520u32), + Location::GPR(tmp), + ); //float 9.22337203E+18 + a.emit_mov(Size::S32, Location::GPR(tmp), Location::XMM(tmp_x1)); + a.emit_mov(Size::S32, Location::XMM(tmp_in), Location::XMM(tmp_x2)); + a.emit_vsubss(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in); + a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + a.emit_mov( + Size::S64, + Location::Imm64(0x8000000000000000u64), + Location::GPR(tmp), + ); + a.emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp)); + a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out); + a.emit_ucomiss(XMMOrMemory::XMM(tmp_x1), tmp_x2); + a.emit_cmovae_gpr_64(tmp, tmp_out); + + m.release_temp_xmm(tmp_x2); + m.release_temp_xmm(tmp_x1); + m.release_temp_gpr(tmp); + }, ); - a.emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp)); - a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out); - a.emit_ucomiss(XMMOrMemory::XMM(tmp_x1), tmp_x2); - a.emit_cmovae_gpr_64(tmp, tmp_out); - a.emit_mov(Size::S64, Location::GPR(tmp_out), ret); - self.machine.release_temp_xmm(tmp_x2); - self.machine.release_temp_xmm(tmp_x1); - self.machine.release_temp_gpr(tmp); + a.emit_mov(Size::S64, Location::GPR(tmp_out), ret); self.machine.release_temp_xmm(tmp_in); self.machine.release_temp_gpr(tmp_out); } @@ -5085,17 +5235,29 @@ impl FunctionCodeGenerator for X64FunctionCode { loc, Location::XMM(tmp_in), ); - Self::emit_f64_int_conv_check_zero( + Self::emit_f64_int_conv_check_sat( a, &mut self.machine, tmp_in, -1.0, 4294967296.0, + |a, _m| { + a.emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)); + }, + |a, _m| { + a.emit_mov( + Size::S32, + Location::Imm32(std::u32::MAX), + Location::GPR(tmp_out), + ); + }, + None::, + |a, _m| { + a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + }, ); - a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); a.emit_mov(Size::S32, Location::GPR(tmp_out), ret); - self.machine.release_temp_xmm(tmp_in); self.machine.release_temp_gpr(tmp_out); } @@ -5191,17 +5353,35 @@ impl FunctionCodeGenerator for X64FunctionCode { } }; - Self::emit_f64_int_conv_check_zero( + Self::emit_f64_int_conv_check_sat( a, &mut self.machine, real_in, -2147483649.0, 2147483648.0, + |a, _m| { + a.emit_mov( + Size::S32, + Location::Imm32(std::i32::MIN as u32), + Location::GPR(tmp_out), + ); + }, + |a, _m| { + a.emit_mov( + Size::S32, + Location::Imm32(std::i32::MAX as u32), + Location::GPR(tmp_out), + ); + }, + Some(|a: &mut Assembler, _m: &mut Machine| { + a.emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)); + }), + |a, _m| { + a.emit_cvttsd2si_32(XMMOrMemory::XMM(real_in), tmp_out); + }, ); - a.emit_cvttsd2si_32(XMMOrMemory::XMM(real_in), tmp_out); a.emit_mov(Size::S32, Location::GPR(tmp_out), ret); - self.machine.release_temp_xmm(tmp_in); self.machine.release_temp_gpr(tmp_out); } @@ -5287,17 +5467,35 @@ impl FunctionCodeGenerator for X64FunctionCode { loc, Location::XMM(tmp_in), ); - Self::emit_f64_int_conv_check_zero( + Self::emit_f64_int_conv_check_sat( a, &mut self.machine, tmp_in, -9223372036854777856.0, 9223372036854775808.0, + |a, _m| { + a.emit_mov( + Size::S64, + Location::Imm64(std::i64::MIN as u64), + Location::GPR(tmp_out), + ); + }, + |a, _m| { + a.emit_mov( + Size::S64, + Location::Imm64(std::i64::MAX as u64), + Location::GPR(tmp_out), + ); + }, + Some(|a: &mut Assembler, _m: &mut Machine| { + a.emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)); + }), + |a, _m| { + a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + }, ); - a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); a.emit_mov(Size::S64, Location::GPR(tmp_out), ret); - self.machine.release_temp_xmm(tmp_in); self.machine.release_temp_gpr(tmp_out); } @@ -5407,41 +5605,54 @@ impl FunctionCodeGenerator for X64FunctionCode { loc, Location::XMM(tmp_in), ); - Self::emit_f64_int_conv_check_zero( + Self::emit_f64_int_conv_check_sat( a, &mut self.machine, tmp_in, -1.0, 18446744073709551616.0, - ); - - let tmp = self.machine.acquire_temp_gpr().unwrap(); // r15 - let tmp_x1 = self.machine.acquire_temp_xmm().unwrap(); // xmm1 - let tmp_x2 = self.machine.acquire_temp_xmm().unwrap(); // xmm3 + |a, _m| { + a.emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)); + }, + |a, _m| { + a.emit_mov( + Size::S64, + Location::Imm64(std::u64::MAX), + Location::GPR(tmp_out), + ); + }, + None::, + |a, m| { + let tmp = m.acquire_temp_gpr().unwrap(); // r15 + let tmp_x1 = m.acquire_temp_xmm().unwrap(); // xmm1 + let tmp_x2 = m.acquire_temp_xmm().unwrap(); // xmm3 - a.emit_mov( - Size::S64, - Location::Imm64(4890909195324358656u64), - Location::GPR(tmp), - ); //double 9.2233720368547758E+18 - a.emit_mov(Size::S64, Location::GPR(tmp), Location::XMM(tmp_x1)); - a.emit_mov(Size::S64, Location::XMM(tmp_in), Location::XMM(tmp_x2)); - a.emit_vsubsd(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in); - a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); - a.emit_mov( - Size::S64, - Location::Imm64(0x8000000000000000u64), - Location::GPR(tmp), + a.emit_mov( + Size::S64, + Location::Imm64(4890909195324358656u64), + Location::GPR(tmp), + ); //double 9.2233720368547758E+18 + a.emit_mov(Size::S64, Location::GPR(tmp), Location::XMM(tmp_x1)); + a.emit_mov(Size::S64, Location::XMM(tmp_in), Location::XMM(tmp_x2)); + a.emit_vsubsd(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in); + a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + a.emit_mov( + Size::S64, + Location::Imm64(0x8000000000000000u64), + Location::GPR(tmp), + ); + a.emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp)); + a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out); + a.emit_ucomisd(XMMOrMemory::XMM(tmp_x1), tmp_x2); + a.emit_cmovae_gpr_64(tmp, tmp_out); + + m.release_temp_xmm(tmp_x2); + m.release_temp_xmm(tmp_x1); + m.release_temp_gpr(tmp); + }, ); - a.emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp)); - a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out); - a.emit_ucomisd(XMMOrMemory::XMM(tmp_x1), tmp_x2); - a.emit_cmovae_gpr_64(tmp, tmp_out); - a.emit_mov(Size::S64, Location::GPR(tmp_out), ret); - self.machine.release_temp_xmm(tmp_x2); - self.machine.release_temp_xmm(tmp_x1); - self.machine.release_temp_gpr(tmp); + a.emit_mov(Size::S64, Location::GPR(tmp_out), ret); self.machine.release_temp_xmm(tmp_in); self.machine.release_temp_gpr(tmp_out); } From 32ed6f2c1051ac383e2c8b73eff0b1281741a535 Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Mon, 16 Dec 2019 14:30:14 -0800 Subject: [PATCH 06/12] Enable non-trapping float to int conversions by default. --- src/bin/wasmer.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/bin/wasmer.rs b/src/bin/wasmer.rs index 84b4da4e686..ea7d032039e 100644 --- a/src/bin/wasmer.rs +++ b/src/bin/wasmer.rs @@ -102,6 +102,7 @@ impl PrestandardFeatures { features.enable_threads(); } features.enable_sign_extension(); + features.enable_sat_float_to_int(); features } From b7929e65618e419a204984c302fd8c170106b098 Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Mon, 16 Dec 2019 15:15:49 -0800 Subject: [PATCH 07/12] Add support for non-trapping float to int conversions in singlepass+AArch64. --- lib/singlepass-backend/src/codegen_x64.rs | 144 +++++++++++++--------- 1 file changed, 88 insertions(+), 56 deletions(-) diff --git a/lib/singlepass-backend/src/codegen_x64.rs b/lib/singlepass-backend/src/codegen_x64.rs index 42eab61ce22..797359bec93 100644 --- a/lib/singlepass-backend/src/codegen_x64.rs +++ b/lib/singlepass-backend/src/codegen_x64.rs @@ -4763,7 +4763,11 @@ impl FunctionCodeGenerator for X64FunctionCode { }, None::, |a, _m| { - a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + if a.arch_has_itruncf() { + a.arch_emit_i32_trunc_uf32(tmp_in, tmp_out); + } else { + a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + } }, ); @@ -4876,7 +4880,11 @@ impl FunctionCodeGenerator for X64FunctionCode { a.emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)); }), |a, _m| { - a.emit_cvttss2si_32(XMMOrMemory::XMM(tmp_in), tmp_out); + if a.arch_has_itruncf() { + a.arch_emit_i32_trunc_sf32(tmp_in, tmp_out); + } else { + a.emit_cvttss2si_32(XMMOrMemory::XMM(tmp_in), tmp_out); + } }, ); @@ -4989,7 +4997,11 @@ impl FunctionCodeGenerator for X64FunctionCode { a.emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)); }), |a, _m| { - a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + if a.arch_has_itruncf() { + a.arch_emit_i64_trunc_sf32(tmp_in, tmp_out); + } else { + a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + } }, ); @@ -5120,32 +5132,36 @@ impl FunctionCodeGenerator for X64FunctionCode { }, None::, |a, m| { - let tmp = m.acquire_temp_gpr().unwrap(); // r15 - let tmp_x1 = m.acquire_temp_xmm().unwrap(); // xmm1 - let tmp_x2 = m.acquire_temp_xmm().unwrap(); // xmm3 + if a.arch_has_itruncf() { + a.arch_emit_i64_trunc_uf32(tmp_in, tmp_out); + } else { + let tmp = m.acquire_temp_gpr().unwrap(); // r15 + let tmp_x1 = m.acquire_temp_xmm().unwrap(); // xmm1 + let tmp_x2 = m.acquire_temp_xmm().unwrap(); // xmm3 - a.emit_mov( - Size::S32, - Location::Imm32(1593835520u32), - Location::GPR(tmp), - ); //float 9.22337203E+18 - a.emit_mov(Size::S32, Location::GPR(tmp), Location::XMM(tmp_x1)); - a.emit_mov(Size::S32, Location::XMM(tmp_in), Location::XMM(tmp_x2)); - a.emit_vsubss(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in); - a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); - a.emit_mov( - Size::S64, - Location::Imm64(0x8000000000000000u64), - Location::GPR(tmp), - ); - a.emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp)); - a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out); - a.emit_ucomiss(XMMOrMemory::XMM(tmp_x1), tmp_x2); - a.emit_cmovae_gpr_64(tmp, tmp_out); - - m.release_temp_xmm(tmp_x2); - m.release_temp_xmm(tmp_x1); - m.release_temp_gpr(tmp); + a.emit_mov( + Size::S32, + Location::Imm32(1593835520u32), + Location::GPR(tmp), + ); //float 9.22337203E+18 + a.emit_mov(Size::S32, Location::GPR(tmp), Location::XMM(tmp_x1)); + a.emit_mov(Size::S32, Location::XMM(tmp_in), Location::XMM(tmp_x2)); + a.emit_vsubss(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in); + a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + a.emit_mov( + Size::S64, + Location::Imm64(0x8000000000000000u64), + Location::GPR(tmp), + ); + a.emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp)); + a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out); + a.emit_ucomiss(XMMOrMemory::XMM(tmp_x1), tmp_x2); + a.emit_cmovae_gpr_64(tmp, tmp_out); + + m.release_temp_xmm(tmp_x2); + m.release_temp_xmm(tmp_x1); + m.release_temp_gpr(tmp); + } }, ); @@ -5253,7 +5269,11 @@ impl FunctionCodeGenerator for X64FunctionCode { }, None::, |a, _m| { - a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + if a.arch_has_itruncf() { + a.arch_emit_i32_trunc_uf64(tmp_in, tmp_out); + } else { + a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + } }, ); @@ -5377,7 +5397,11 @@ impl FunctionCodeGenerator for X64FunctionCode { a.emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)); }), |a, _m| { - a.emit_cvttsd2si_32(XMMOrMemory::XMM(real_in), tmp_out); + if a.arch_has_itruncf() { + a.arch_emit_i32_trunc_sf64(tmp_in, tmp_out); + } else { + a.emit_cvttsd2si_32(XMMOrMemory::XMM(real_in), tmp_out); + } }, ); @@ -5491,7 +5515,11 @@ impl FunctionCodeGenerator for X64FunctionCode { a.emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)); }), |a, _m| { - a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + if a.arch_has_itruncf() { + a.arch_emit_i64_trunc_sf64(tmp_in, tmp_out); + } else { + a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + } }, ); @@ -5623,32 +5651,36 @@ impl FunctionCodeGenerator for X64FunctionCode { }, None::, |a, m| { - let tmp = m.acquire_temp_gpr().unwrap(); // r15 - let tmp_x1 = m.acquire_temp_xmm().unwrap(); // xmm1 - let tmp_x2 = m.acquire_temp_xmm().unwrap(); // xmm3 + if a.arch_has_itruncf() { + a.arch_emit_i64_trunc_uf64(tmp_in, tmp_out); + } else { + let tmp = m.acquire_temp_gpr().unwrap(); // r15 + let tmp_x1 = m.acquire_temp_xmm().unwrap(); // xmm1 + let tmp_x2 = m.acquire_temp_xmm().unwrap(); // xmm3 - a.emit_mov( - Size::S64, - Location::Imm64(4890909195324358656u64), - Location::GPR(tmp), - ); //double 9.2233720368547758E+18 - a.emit_mov(Size::S64, Location::GPR(tmp), Location::XMM(tmp_x1)); - a.emit_mov(Size::S64, Location::XMM(tmp_in), Location::XMM(tmp_x2)); - a.emit_vsubsd(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in); - a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); - a.emit_mov( - Size::S64, - Location::Imm64(0x8000000000000000u64), - Location::GPR(tmp), - ); - a.emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp)); - a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out); - a.emit_ucomisd(XMMOrMemory::XMM(tmp_x1), tmp_x2); - a.emit_cmovae_gpr_64(tmp, tmp_out); - - m.release_temp_xmm(tmp_x2); - m.release_temp_xmm(tmp_x1); - m.release_temp_gpr(tmp); + a.emit_mov( + Size::S64, + Location::Imm64(4890909195324358656u64), + Location::GPR(tmp), + ); //double 9.2233720368547758E+18 + a.emit_mov(Size::S64, Location::GPR(tmp), Location::XMM(tmp_x1)); + a.emit_mov(Size::S64, Location::XMM(tmp_in), Location::XMM(tmp_x2)); + a.emit_vsubsd(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in); + a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + a.emit_mov( + Size::S64, + Location::Imm64(0x8000000000000000u64), + Location::GPR(tmp), + ); + a.emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp)); + a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out); + a.emit_ucomisd(XMMOrMemory::XMM(tmp_x1), tmp_x2); + a.emit_cmovae_gpr_64(tmp, tmp_out); + + m.release_temp_xmm(tmp_x2); + m.release_temp_xmm(tmp_x1); + m.release_temp_gpr(tmp); + } }, ); From f8d792cf04bb5376db458e23aead2334dcf026e3 Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Mon, 16 Dec 2019 15:33:47 -0800 Subject: [PATCH 08/12] Add changelog entry. --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b3f2065768c..4ce893f60a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ Special thanks to [@ethanfrey](https://github.com/ethanfrey), [@AdamSLevy](https - [#1060](https://github.com/wasmerio/wasmer/pull/1060) Test the capi with all the backends - [#1069](https://github.com/wasmerio/wasmer/pull/1069) Add function `get_memory_and_data` to `Ctx` to help prevent undefined behavior and mutable aliasing. It allows accessing memory while borrowing data mutably for the `Ctx` lifetime. This new function is now being used in `wasmer-wasi`. - [#1058](https://github.com/wasmerio/wasmer/pull/1058) Fix minor panic issue when `wasmer::compile_with` called with llvm backend. +- [#1071](https://github.com/wasmerio/wasmer/pull/1071) Add support for non-trapping float-to-int conversions, enabled by default. - [#858](https://github.com/wasmerio/wasmer/pull/858) Minor panic fix when wasmer binary with `loader` option run a module without exported `_start` function. - [#1056](https://github.com/wasmerio/wasmer/pull/1056) Improved `--invoke` args parsing (supporting `i32`, `i64`, `f32` and `f32`) in Wasmer CLI - [#1054](https://github.com/wasmerio/wasmer/pull/1054) Improve `--invoke` output in Wasmer CLI From 56fd66403c3e9ce4c9e91b2e826e59526f289762 Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Fri, 20 Dec 2019 12:59:59 -0800 Subject: [PATCH 09/12] Update changelog again, to move entry to new unreleased section since there's been a release. --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ce893f60a4..ee435cfb385 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## **[Unreleased]** - [#1092](https://github.com/wasmerio/wasmer/pull/1092) Add `get_utf8_string_with_nul` to `WasmPtr` to read nul-terminated strings from memory. +- [#1071](https://github.com/wasmerio/wasmer/pull/1071) Add support for non-trapping float-to-int conversions, enabled by default. ## 0.12.0 - 2019-12-18 @@ -14,7 +15,6 @@ Special thanks to [@ethanfrey](https://github.com/ethanfrey), [@AdamSLevy](https - [#1060](https://github.com/wasmerio/wasmer/pull/1060) Test the capi with all the backends - [#1069](https://github.com/wasmerio/wasmer/pull/1069) Add function `get_memory_and_data` to `Ctx` to help prevent undefined behavior and mutable aliasing. It allows accessing memory while borrowing data mutably for the `Ctx` lifetime. This new function is now being used in `wasmer-wasi`. - [#1058](https://github.com/wasmerio/wasmer/pull/1058) Fix minor panic issue when `wasmer::compile_with` called with llvm backend. -- [#1071](https://github.com/wasmerio/wasmer/pull/1071) Add support for non-trapping float-to-int conversions, enabled by default. - [#858](https://github.com/wasmerio/wasmer/pull/858) Minor panic fix when wasmer binary with `loader` option run a module without exported `_start` function. - [#1056](https://github.com/wasmerio/wasmer/pull/1056) Improved `--invoke` args parsing (supporting `i32`, `i64`, `f32` and `f32`) in Wasmer CLI - [#1054](https://github.com/wasmerio/wasmer/pull/1054) Improve `--invoke` output in Wasmer CLI From f00283ab030bcd579d2ffa73c9212a1019492c8a Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Fri, 20 Dec 2019 13:00:36 -0800 Subject: [PATCH 10/12] Name the magic constants in the LLVM backend. --- lib/llvm-backend/src/code.rs | 71 ++++++++++++++++++++++++++++-------- 1 file changed, 55 insertions(+), 16 deletions(-) diff --git a/lib/llvm-backend/src/code.rs b/lib/llvm-backend/src/code.rs index a566b5a27b2..14982546d93 100644 --- a/lib/llvm-backend/src/code.rs +++ b/lib/llvm-backend/src/code.rs @@ -4556,8 +4556,8 @@ impl<'ctx> FunctionCodeGenerator for LLVMFunctionCodeGenerator<'ct let res = trunc_sat_scalar( builder, intrinsics.i32_ty, - std::i32::MIN as u64, - 2147483520u64, // bits as f32: 0x4effffff + LEF32_GEQ_I32_MIN, + GEF32_LEQ_I32_MAX, std::i32::MIN as u32 as u64, std::i32::MAX as u32 as u64, v, @@ -4572,8 +4572,8 @@ impl<'ctx> FunctionCodeGenerator for LLVMFunctionCodeGenerator<'ct let res = trunc_sat_scalar( builder, intrinsics.i32_ty, - std::i32::MIN as u64, - std::i32::MAX as u64, + LEF64_GEQ_I32_MIN, + GEF64_LEQ_I32_MAX, std::i32::MIN as u64, std::i32::MAX as u64, v, @@ -4615,8 +4615,8 @@ impl<'ctx> FunctionCodeGenerator for LLVMFunctionCodeGenerator<'ct let res = trunc_sat_scalar( builder, intrinsics.i64_ty, - std::i64::MIN as u64, - 9223371487098961920, // bits as f32: 0x5eff_ffff + LEF32_GEQ_I64_MIN, + GEF32_LEQ_I64_MAX, std::i64::MIN as u64, std::i64::MAX as u64, v, @@ -4631,8 +4631,8 @@ impl<'ctx> FunctionCodeGenerator for LLVMFunctionCodeGenerator<'ct let res = trunc_sat_scalar( builder, intrinsics.i64_ty, - std::i64::MIN as u64, - 9223372036854774784, // bits as f64: 0x43df_ffff_ffff_ffff + LEF64_GEQ_I64_MIN, + GEF64_LEQ_I64_MAX, std::i64::MIN as u64, std::i64::MAX as u64, v, @@ -4673,8 +4673,8 @@ impl<'ctx> FunctionCodeGenerator for LLVMFunctionCodeGenerator<'ct let res = trunc_sat_scalar( builder, intrinsics.i32_ty, - std::u32::MIN as u64, - 4294967040, // bits as f32: 0x4f7fffff + LEF32_GEQ_U32_MIN, + GEF32_LEQ_U32_MAX, std::u32::MIN as u64, std::u32::MAX as u64, v, @@ -4689,8 +4689,8 @@ impl<'ctx> FunctionCodeGenerator for LLVMFunctionCodeGenerator<'ct let res = trunc_sat_scalar( builder, intrinsics.i32_ty, - std::u32::MIN as u64, - 4294967295, // bits as f64: 0x41efffffffffffff + LEF64_GEQ_U32_MIN, + GEF64_LEQ_U32_MAX, std::u32::MIN as u64, std::u32::MAX as u64, v, @@ -4731,8 +4731,8 @@ impl<'ctx> FunctionCodeGenerator for LLVMFunctionCodeGenerator<'ct let res = trunc_sat_scalar( builder, intrinsics.i64_ty, - std::u64::MIN, - 18446742974197923840, // bits as f32: 0x5f7fffff + LEF32_GEQ_U64_MIN, + GEF32_LEQ_U64_MAX, std::u64::MIN, std::u64::MAX, v, @@ -4747,8 +4747,8 @@ impl<'ctx> FunctionCodeGenerator for LLVMFunctionCodeGenerator<'ct let res = trunc_sat_scalar( builder, intrinsics.i64_ty, - std::u64::MIN, - 18446744073709549568u64, // bits as f64: 0x43EFFFFFFFFFFFFF + LEF64_GEQ_U64_MIN, + GEF64_LEQ_U64_MAX, std::u64::MIN, std::u64::MAX, v, @@ -8987,3 +8987,42 @@ fn is_f64_arithmetic(bits: u64) -> bool { let bits = bits & 0x7FFF_FFFF_FFFF_FFFF; bits < 0x7FF8_0000_0000_0000 } + +// Constants for the bounds of truncation operations. These are the least or +// greatest exact floats in either f32 or f64 representation +// greater-than-or-equal-to (for least) or less-than-or-equal-to (for greatest) +// the i32 or i64 or u32 or u64 min (for least) or max (for greatest), when +// rounding towards zero. + +/// Least Exact Float (32 bits) greater-than-or-equal-to i32::MIN when rounding towards zero. +const LEF32_GEQ_I32_MIN: u64 = std::i32::MIN as u64; +/// Greatest Exact Float (32 bits) less-than-or-equal-to i32::MAX when rounding towards zero. +const GEF32_LEQ_I32_MAX: u64 = 2147483520; // bits as f32: 0x4eff_ffff +/// Least Exact Float (64 bits) greater-than-or-equal-to i32::MIN when rounding towards zero. +const LEF64_GEQ_I32_MIN: u64 = std::i32::MIN as u64; +/// Greatest Exact Float (64 bits) less-than-or-equal-to i32::MAX when rounding towards zero. +const GEF64_LEQ_I32_MAX: u64 = std::i32::MAX as u64; +/// Least Exact Float (32 bits) greater-than-or-equal-to u32::MIN when rounding towards zero. +const LEF32_GEQ_U32_MIN: u64 = std::u32::MIN as u64; +/// Greatest Exact Float (32 bits) less-than-or-equal-to u32::MAX when rounding towards zero. +const GEF32_LEQ_U32_MAX: u64 = 4294967040; // bits as f32: 0x4f7f_ffff +/// Least Exact Float (64 bits) greater-than-or-equal-to u32::MIN when rounding towards zero. +const LEF64_GEQ_U32_MIN: u64 = std::u32::MIN as u64; +/// Greatest Exact Float (64 bits) less-than-or-equal-to u32::MAX when rounding towards zero. +const GEF64_LEQ_U32_MAX: u64 = 4294967295; // bits as f64: 0x41ef_ffff_ffff_ffff +/// Least Exact Float (32 bits) greater-than-or-equal-to i64::MIN when rounding towards zero. +const LEF32_GEQ_I64_MIN: u64 = std::i64::MIN as u64; +/// Greatest Exact Float (32 bits) less-than-or-equal-to i64::MAX when rounding towards zero. +const GEF32_LEQ_I64_MAX: u64 = 9223371487098961920; // bits as f32: 0x5eff_ffff +/// Least Exact Float (64 bits) greater-than-or-equal-to i64::MIN when rounding towards zero. +const LEF64_GEQ_I64_MIN: u64 = std::i64::MIN as u64; +/// Greatest Exact Float (64 bits) less-than-or-equal-to i64::MAX when rounding towards zero. +const GEF64_LEQ_I64_MAX: u64 = 9223372036854774784; // bits as f64: 0x43df_ffff_ffff_ffff +/// Least Exact Float (32 bits) greater-than-or-equal-to u64::MIN when rounding towards zero. +const LEF32_GEQ_U64_MIN: u64 = std::u64::MIN; +/// Greatest Exact Float (32 bits) less-than-or-equal-to u64::MAX when rounding towards zero. +const GEF32_LEQ_U64_MAX: u64 = 18446742974197923840; // bits as f32: 0x5f7f_ffff +/// Least Exact Float (64 bits) greater-than-or-equal-to u64::MIN when rounding towards zero. +const LEF64_GEQ_U64_MIN: u64 = std::u64::MIN; +/// Greatest Exact Float (64 bits) less-than-or-equal-to u64::MAX when rounding towards zero. +const GEF64_LEQ_U64_MAX: u64 = 18446744073709549568; // bits as f64: 0x43ef_ffff_ffff_ffff From e738a9f2b5f54ae5cd7db38fc6ed437b4cc56a2f Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Fri, 20 Dec 2019 14:33:03 -0800 Subject: [PATCH 11/12] Name the magic constants in the singlepass backend. --- lib/singlepass-backend/src/codegen_x64.rs | 103 +++++++++++++++------- 1 file changed, 71 insertions(+), 32 deletions(-) diff --git a/lib/singlepass-backend/src/codegen_x64.rs b/lib/singlepass-backend/src/codegen_x64.rs index 797359bec93..c4da1fb7a6c 100644 --- a/lib/singlepass-backend/src/codegen_x64.rs +++ b/lib/singlepass-backend/src/codegen_x64.rs @@ -4713,8 +4713,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, tmp_in, - -1.0, - 4294967296.0, + GEF32_LT_U32_MIN, + LEF32_GT_U32_MAX, ); a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); @@ -4749,8 +4749,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, tmp_in, - -1.0, - 4294967296.0, + GEF32_LT_U32_MIN, + LEF32_GT_U32_MAX, |a, _m| { a.emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)); }, @@ -4824,8 +4824,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, tmp_in, - -2147483904.0, - 2147483648.0, + GEF32_LT_I32_MIN, + LEF32_GT_I32_MAX, ); a.emit_cvttss2si_32(XMMOrMemory::XMM(tmp_in), tmp_out); @@ -4860,8 +4860,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, tmp_in, - -2147483904.0, - 2147483648.0, + GEF32_LT_I32_MIN, + LEF32_GT_I32_MAX, |a, _m| { a.emit_mov( Size::S32, @@ -4941,8 +4941,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, tmp_in, - -9223373136366403584.0, - 9223372036854775808.0, + GEF32_LT_I64_MIN, + LEF32_GT_I64_MAX, ); a.emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); a.emit_mov(Size::S64, Location::GPR(tmp_out), ret); @@ -4977,8 +4977,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, tmp_in, - -9223373136366403584.0, - 9223372036854775808.0, + GEF32_LT_I64_MIN, + LEF32_GT_I64_MAX, |a, _m| { a.emit_mov( Size::S64, @@ -5058,8 +5058,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, tmp_in, - -1.0, - 18446744073709551616.0, + GEF32_LT_U64_MIN, + LEF32_GT_U64_MAX, ); let tmp = self.machine.acquire_temp_gpr().unwrap(); // r15 @@ -5118,8 +5118,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, tmp_in, - -1.0, - 18446744073709551616.0, + GEF32_LT_U64_MIN, + LEF32_GT_U64_MAX, |a, _m| { a.emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)); }, @@ -5218,8 +5218,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, tmp_in, - -1.0, - 4294967296.0, + GEF64_LT_U32_MIN, + LEF64_GT_U32_MAX, ); a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); @@ -5255,8 +5255,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, tmp_in, - -1.0, - 4294967296.0, + GEF64_LT_U32_MIN, + LEF64_GT_U32_MAX, |a, _m| { a.emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)); }, @@ -5335,8 +5335,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, real_in, - -2147483649.0, - 2147483648.0, + GEF64_LT_I32_MIN, + LEF64_GT_I32_MAX, ); a.emit_cvttsd2si_32(XMMOrMemory::XMM(real_in), tmp_out); @@ -5377,8 +5377,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, real_in, - -2147483649.0, - 2147483648.0, + GEF64_LT_I32_MIN, + LEF64_GT_I32_MAX, |a, _m| { a.emit_mov( Size::S32, @@ -5458,8 +5458,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, tmp_in, - -9223372036854777856.0, - 9223372036854775808.0, + GEF64_LT_I64_MIN, + LEF64_GT_I64_MAX, ); a.emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); @@ -5495,8 +5495,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, tmp_in, - -9223372036854777856.0, - 9223372036854775808.0, + GEF64_LT_I64_MIN, + LEF64_GT_I64_MAX, |a, _m| { a.emit_mov( Size::S64, @@ -5576,8 +5576,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, tmp_in, - -1.0, - 18446744073709551616.0, + GEF64_LT_U64_MIN, + LEF64_GT_U64_MAX, ); let tmp = self.machine.acquire_temp_gpr().unwrap(); // r15 @@ -5637,8 +5637,8 @@ impl FunctionCodeGenerator for X64FunctionCode { a, &mut self.machine, tmp_in, - -1.0, - 18446744073709551616.0, + GEF64_LT_U64_MIN, + LEF64_GT_U64_MAX, |a, _m| { a.emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)); }, @@ -9642,3 +9642,42 @@ fn sort_call_movs(movs: &mut [(Location, GPR)]) { } */ } + +// Constants for the bounds of truncation operations. These are the least or +// greatest exact floats in either f32 or f64 representation less-than (for +// least) or greater-than (for greatest) the i32 or i64 or u32 or u64 +// min (for least) or max (for greatest), when rounding towards zero. + +/// Greatest Exact Float (32 bits) less-than i32::MIN when rounding towards zero. +const GEF32_LT_I32_MIN: f32 = -2147483904.0; +/// Least Exact Float (32 bits) greater-than i32::MAX when rounding towards zero. +const LEF32_GT_I32_MAX: f32 = 2147483648.0; +/// Greatest Exact Float (32 bits) less-than i64::MIN when rounding towards zero. +const GEF32_LT_I64_MIN: f32 = -9223373136366403584.0; +/// Least Exact Float (32 bits) greater-than i64::MAX when rounding towards zero. +const LEF32_GT_I64_MAX: f32 = 9223372036854775808.0; +/// Greatest Exact Float (32 bits) less-than u32::MIN when rounding towards zero. +const GEF32_LT_U32_MIN: f32 = -1.0; +/// Least Exact Float (32 bits) greater-than u32::MAX when rounding towards zero. +const LEF32_GT_U32_MAX: f32 = 4294967296.0; +/// Greatest Exact Float (32 bits) less-than u64::MIN when rounding towards zero. +const GEF32_LT_U64_MIN: f32 = -1.0; +/// Least Exact Float (32 bits) greater-than u64::MAX when rounding towards zero. +const LEF32_GT_U64_MAX: f32 = 18446744073709551616.0; + +/// Greatest Exact Float (64 bits) less-than i32::MIN when rounding towards zero. +const GEF64_LT_I32_MIN: f64 = -2147483649.0; +/// Least Exact Float (64 bits) greater-than i32::MAX when rounding towards zero. +const LEF64_GT_I32_MAX: f64 = 2147483648.0; +/// Greatest Exact Float (64 bits) less-than i64::MIN when rounding towards zero. +const GEF64_LT_I64_MIN: f64 = -9223372036854777856.0; +/// Least Exact Float (64 bits) greater-than i64::MAX when rounding towards zero. +const LEF64_GT_I64_MAX: f64 = 9223372036854775808.0; +/// Greatest Exact Float (64 bits) less-than u32::MIN when rounding towards zero. +const GEF64_LT_U32_MIN: f64 = -1.0; +/// Least Exact Float (64 bits) greater-than u32::MAX when rounding towards zero. +const LEF64_GT_U32_MAX: f64 = 4294967296.0; +/// Greatest Exact Float (64 bits) less-than u64::MIN when rounding towards zero. +const GEF64_LT_U64_MIN: f64 = -1.0; +/// Least Exact Float (64 bits) greater-than u64::MAX when rounding towards zero. +const LEF64_GT_U64_MAX: f64 = 18446744073709551616.0; From bba012915050a2172ddc591f90aa33c3e834d8cf Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Fri, 20 Dec 2019 14:51:46 -0800 Subject: [PATCH 12/12] Remove comments with register names that might not be right and don't really matter. --- lib/singlepass-backend/src/codegen_x64.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/singlepass-backend/src/codegen_x64.rs b/lib/singlepass-backend/src/codegen_x64.rs index c4da1fb7a6c..23a6a8e125d 100644 --- a/lib/singlepass-backend/src/codegen_x64.rs +++ b/lib/singlepass-backend/src/codegen_x64.rs @@ -5104,7 +5104,7 @@ impl FunctionCodeGenerator for X64FunctionCode { self.value_stack.push(ret); let tmp_out = self.machine.acquire_temp_gpr().unwrap(); - let tmp_in = self.machine.acquire_temp_xmm().unwrap(); // xmm2 + let tmp_in = self.machine.acquire_temp_xmm().unwrap(); Self::emit_relaxed_binop( a, @@ -5135,9 +5135,9 @@ impl FunctionCodeGenerator for X64FunctionCode { if a.arch_has_itruncf() { a.arch_emit_i64_trunc_uf32(tmp_in, tmp_out); } else { - let tmp = m.acquire_temp_gpr().unwrap(); // r15 - let tmp_x1 = m.acquire_temp_xmm().unwrap(); // xmm1 - let tmp_x2 = m.acquire_temp_xmm().unwrap(); // xmm3 + let tmp = m.acquire_temp_gpr().unwrap(); + let tmp_x1 = m.acquire_temp_xmm().unwrap(); + let tmp_x2 = m.acquire_temp_xmm().unwrap(); a.emit_mov( Size::S32, @@ -5623,7 +5623,7 @@ impl FunctionCodeGenerator for X64FunctionCode { self.value_stack.push(ret); let tmp_out = self.machine.acquire_temp_gpr().unwrap(); - let tmp_in = self.machine.acquire_temp_xmm().unwrap(); // xmm2 + let tmp_in = self.machine.acquire_temp_xmm().unwrap(); Self::emit_relaxed_binop( a, @@ -5654,9 +5654,9 @@ impl FunctionCodeGenerator for X64FunctionCode { if a.arch_has_itruncf() { a.arch_emit_i64_trunc_uf64(tmp_in, tmp_out); } else { - let tmp = m.acquire_temp_gpr().unwrap(); // r15 - let tmp_x1 = m.acquire_temp_xmm().unwrap(); // xmm1 - let tmp_x2 = m.acquire_temp_xmm().unwrap(); // xmm3 + let tmp = m.acquire_temp_gpr().unwrap(); + let tmp_x1 = m.acquire_temp_xmm().unwrap(); + let tmp_x2 = m.acquire_temp_xmm().unwrap(); a.emit_mov( Size::S64,