From 1bce92dd5d2d4645a9bbc0e4da98328d0eba5948 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 3 Jun 2021 17:21:18 -0700 Subject: [PATCH] wasm: Mark simd intrinsics as stable This is a follow-up from rust-lang/rust#74372 which has finished FCP for the stabilization of wasm intrinsics. This marks them all stable, as-is and additionally marks the functions which create integer vectors as `const`-stable as well. The only remaining unstable bits are that `f32x4` and `f64x2` are `const`-unstable. Mostly just because I couldn't figure out how to make them `const`-stable. --- crates/core_arch/src/lib.rs | 2 +- crates/core_arch/src/mod.rs | 20 +- crates/core_arch/src/wasm32/simd128.rs | 375 ++++++++++++++++++++++++- examples/hex.rs | 1 - 4 files changed, 373 insertions(+), 25 deletions(-) diff --git a/crates/core_arch/src/lib.rs b/crates/core_arch/src/lib.rs index ddf012f635..da5b4713aa 100644 --- a/crates/core_arch/src/lib.rs +++ b/crates/core_arch/src/lib.rs @@ -39,7 +39,7 @@ bench_black_box )] #![cfg_attr(test, feature(test, abi_vectorcall))] -#![cfg_attr(all(test, target_arch = "wasm32"), feature(wasm_simd))] +#![cfg_attr(target_arch = "wasm32", feature(wasm_simd_const))] #![deny(clippy::missing_inline_in_public_items)] #![allow( clippy::inline_always, diff --git a/crates/core_arch/src/mod.rs b/crates/core_arch/src/mod.rs index ede4e5a3fb..7812dc91f9 100644 --- a/crates/core_arch/src/mod.rs +++ b/crates/core_arch/src/mod.rs @@ -64,7 +64,7 @@ pub mod arch { /// proposals such as [atomics] and [simd]. /// /// Intrinsics in the `wasm32` module are modeled after the WebAssembly - /// instructions that they represent. All functions are named after the + /// instructions that they represent. Most functions are named after the /// instruction they intend to correspond to, and the arguments/results /// correspond to the type signature of the instruction itself. Stable /// WebAssembly instructions are [documented online][instrdoc]. @@ -104,19 +104,11 @@ pub mod arch { /// /// ## SIMD /// - /// The [simd proposal][simd] for WebAssembly adds a new `v128` type for a - /// 128-bit SIMD register. It also adds a large array of instructions to - /// operate on the `v128` type to perform data processing. The SIMD proposal - /// at the time of this writing is in [phase 4] which means that it's in the - /// standardization phase. It's expected that once some testing on nightly - /// has happened a stabilization proposal will be made for the Rust - /// intrinsics. If you notice anything awry please feel free to [open an - /// issue](https://github.com/rust-lang/stdarch/issues/new). - /// - /// [phase 4]: https://github.com/webassembly/proposals - /// - /// Using SIMD is intended to be similar to as you would on `x86_64`, for - /// example. You'd write a function such as: + /// The [simd proposal][simd] for WebAssembly added a new `v128` type for a + /// 128-bit SIMD register. It also added a large array of instructions to + /// operate on the `v128` type to perform data processing. Using SIMD on + /// wasm is intended to be similar to as you would on `x86_64`, for example. + /// You'd write a function such as: /// /// ```rust,ignore /// #[cfg(target_arch = "wasm32")] diff --git a/crates/core_arch/src/wasm32/simd128.rs b/crates/core_arch/src/wasm32/simd128.rs index 55ab0ad6a3..89930f94a9 100644 --- a/crates/core_arch/src/wasm32/simd128.rs +++ b/crates/core_arch/src/wasm32/simd128.rs @@ -3,7 +3,6 @@ //! [WebAssembly `SIMD128` ISA]: //! https://github.com/WebAssembly/simd/blob/master/proposals/simd/SIMD.md -#![unstable(feature = "wasm_simd", issue = "74372")] #![allow(non_camel_case_types)] #![allow(unused_imports)] @@ -38,6 +37,7 @@ types! { /// type in WebAssembly. Operations on `v128` can only be performed with the /// functions in this module. // N.B., internals here are arbitrary. + #[stable(feature = "wasm_simd", since = "1.54.0")] pub struct v128(i32, i32, i32, i32); } @@ -55,7 +55,11 @@ macro_rules! conversions { impl $ty { #[inline(always)] const fn v128(self) -> v128 { - unsafe { mem::transmute(self) } + union A { + a: v128, + b: T, + } + unsafe { A { b: self }.a } } } )* @@ -263,6 +267,7 @@ impl Clone for Unaligned { #[cfg_attr(test, assert_instr(v128.load))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load(m: *const v128) -> v128 { (*(m as *const Unaligned)).0 } @@ -272,6 +277,7 @@ pub unsafe fn v128_load(m: *const v128) -> v128 { #[cfg_attr(test, assert_instr(v128.load8x8_s))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load8x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i16x8_load_extend_i8x8(m: *const i8) -> v128 { let m = *(m as *const Unaligned); simd_cast::<_, simd::i16x8>(m.0).v128() @@ -282,11 +288,13 @@ pub unsafe fn i16x8_load_extend_i8x8(m: *const i8) -> v128 { #[cfg_attr(test, assert_instr(v128.load8x8_u))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load8x8_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i16x8_load_extend_u8x8(m: *const u8) -> v128 { let m = *(m as *const Unaligned); simd_cast::<_, simd::u16x8>(m.0).v128() } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_load_extend_u8x8 as u16x8_load_extend_u8x8; /// Load four 16-bit integers and sign extend each one to a 32-bit lane @@ -294,6 +302,7 @@ pub use i16x8_load_extend_u8x8 as u16x8_load_extend_u8x8; #[cfg_attr(test, assert_instr(v128.load16x4_s))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load16x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i32x4_load_extend_i16x4(m: *const i16) -> v128 { let m = *(m as *const Unaligned); simd_cast::<_, simd::i32x4>(m.0).v128() @@ -304,11 +313,13 @@ pub unsafe fn i32x4_load_extend_i16x4(m: *const i16) -> v128 { #[cfg_attr(test, assert_instr(v128.load16x4_u))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load16x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i32x4_load_extend_u16x4(m: *const u16) -> v128 { let m = *(m as *const Unaligned); simd_cast::<_, simd::u32x4>(m.0).v128() } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_load_extend_u16x4 as u32x4_load_extend_u16x4; /// Load two 32-bit integers and sign extend each one to a 64-bit lane @@ -316,6 +327,7 @@ pub use i32x4_load_extend_u16x4 as u32x4_load_extend_u16x4; #[cfg_attr(test, assert_instr(v128.load32x2_s))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load32x2_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i64x2_load_extend_i32x2(m: *const i32) -> v128 { let m = *(m as *const Unaligned); simd_cast::<_, simd::i64x2>(m.0).v128() @@ -326,11 +338,13 @@ pub unsafe fn i64x2_load_extend_i32x2(m: *const i32) -> v128 { #[cfg_attr(test, assert_instr(v128.load32x2_u))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load32x2_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i64x2_load_extend_u32x2(m: *const u32) -> v128 { let m = *(m as *const Unaligned); simd_cast::<_, simd::u64x2>(m.0).v128() } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_load_extend_u32x2 as u64x2_load_extend_u32x2; /// Load a single element and splat to all lanes of a v128 vector. @@ -338,6 +352,7 @@ pub use i64x2_load_extend_u32x2 as u64x2_load_extend_u32x2; #[cfg_attr(test, assert_instr(v128.load8_splat))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load8_splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load8_splat(m: *const u8) -> v128 { simd::u8x16::splat(*m).v128() } @@ -347,6 +362,7 @@ pub unsafe fn v128_load8_splat(m: *const u8) -> v128 { #[cfg_attr(test, assert_instr(v128.load16_splat))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load16_splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load16_splat(m: *const u16) -> v128 { let m = ptr::read_unaligned(m); simd::u16x8::splat(m).v128() @@ -357,6 +373,7 @@ pub unsafe fn v128_load16_splat(m: *const u16) -> v128 { #[cfg_attr(test, assert_instr(v128.load32_splat))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load32_splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load32_splat(m: *const u32) -> v128 { let m = ptr::read_unaligned(m); simd::u32x4::splat(m).v128() @@ -367,6 +384,7 @@ pub unsafe fn v128_load32_splat(m: *const u32) -> v128 { #[cfg_attr(test, assert_instr(v128.load64_splat))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load64_splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load64_splat(m: *const u64) -> v128 { let m = ptr::read_unaligned(m); simd::u64x2::splat(m).v128() @@ -378,6 +396,7 @@ pub unsafe fn v128_load64_splat(m: *const u64) -> v128 { #[cfg_attr(test, assert_instr(v128.load32_zero))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load32_zero"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load32_zero(m: *const u32) -> v128 { llvm_load32_zero(m).v128() } @@ -388,6 +407,7 @@ pub unsafe fn v128_load32_zero(m: *const u32) -> v128 { #[cfg_attr(test, assert_instr(v128.load64_zero))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load64_zero"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load64_zero(m: *const u64) -> v128 { llvm_load64_zero(m).v128() } @@ -397,6 +417,7 @@ pub unsafe fn v128_load64_zero(m: *const u64) -> v128 { #[cfg_attr(test, assert_instr(v128.store))] #[target_feature(enable = "simd128")] #[doc(alias("v128.store"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_store(m: *mut v128, a: v128) { *(m as *mut Unaligned) = Unaligned(a); } @@ -406,6 +427,7 @@ pub unsafe fn v128_store(m: *mut v128, a: v128) { #[cfg_attr(test, assert_instr(v128.load8_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load8_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load8_lane(v: v128, m: *const u8) -> v128 { static_assert!(L: usize where L < 16); llvm_load8_lane(m, v.as_u8x16(), L).v128() @@ -416,6 +438,7 @@ pub unsafe fn v128_load8_lane(v: v128, m: *const u8) -> v128 { #[cfg_attr(test, assert_instr(v128.load16_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load16_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load16_lane(v: v128, m: *const u16) -> v128 { static_assert!(L: usize where L < 8); llvm_load16_lane(m, v.as_u16x8(), L).v128() @@ -426,6 +449,7 @@ pub unsafe fn v128_load16_lane(v: v128, m: *const u16) -> v128 { #[cfg_attr(test, assert_instr(v128.load32_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load32_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load32_lane(v: v128, m: *const u32) -> v128 { static_assert!(L: usize where L < 4); llvm_load32_lane(m, v.as_u32x4(), L).v128() @@ -436,6 +460,7 @@ pub unsafe fn v128_load32_lane(v: v128, m: *const u32) -> v128 { #[cfg_attr(test, assert_instr(v128.load64_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load64_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load64_lane(v: v128, m: *const u64) -> v128 { static_assert!(L: usize where L < 2); llvm_load64_lane(m, v.as_u64x2(), L).v128() @@ -446,6 +471,7 @@ pub unsafe fn v128_load64_lane(v: v128, m: *const u64) -> v128 { #[cfg_attr(test, assert_instr(v128.store8_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.store8_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_store8_lane(v: v128, m: *mut u8) { static_assert!(L: usize where L < 16); llvm_store8_lane(m, v.as_u8x16(), L); @@ -456,6 +482,7 @@ pub unsafe fn v128_store8_lane(v: v128, m: *mut u8) { #[cfg_attr(test, assert_instr(v128.store16_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.store16_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_store16_lane(v: v128, m: *mut u16) { static_assert!(L: usize where L < 8); llvm_store16_lane(m, v.as_u16x8(), L) @@ -466,6 +493,7 @@ pub unsafe fn v128_store16_lane(v: v128, m: *mut u16) { #[cfg_attr(test, assert_instr(v128.store32_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.store32_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_store32_lane(v: v128, m: *mut u32) { static_assert!(L: usize where L < 4); llvm_store32_lane(m, v.as_u32x4(), L) @@ -476,6 +504,7 @@ pub unsafe fn v128_store32_lane(v: v128, m: *mut u32) { #[cfg_attr(test, assert_instr(v128.store64_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.store64_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_store64_lane(v: v128, m: *mut u64) { static_assert!(L: usize where L < 2); llvm_store64_lane(m, v.as_u64x2(), L) @@ -510,6 +539,8 @@ pub unsafe fn v128_store64_lane(v: v128, m: *mut u64) { ) )] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn i8x16( a0: i8, a1: i8, @@ -528,10 +559,24 @@ pub const fn i8x16( a14: i8, a15: i8, ) -> v128 { - simd::i8x16( - a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, + v128( + (a0 as u8 as i32) + | ((a1 as u8 as i32) << 8) + | ((a2 as u8 as i32) << 16) + | ((a3 as u8 as i32) << 24), + (a4 as u8 as i32) + | ((a5 as u8 as i32) << 8) + | ((a6 as u8 as i32) << 16) + | ((a7 as u8 as i32) << 24), + (a8 as u8 as i32) + | ((a9 as u8 as i32) << 8) + | ((a10 as u8 as i32) << 16) + | ((a11 as u8 as i32) << 24), + (a12 as u8 as i32) + | ((a13 as u8 as i32) << 8) + | ((a14 as u8 as i32) << 16) + | ((a15 as u8 as i32) << 24), ) - .v128() } /// Materializes a SIMD value from the provided operands. @@ -541,6 +586,8 @@ pub const fn i8x16( #[inline] #[target_feature(enable = "simd128")] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn u8x16( a0: u8, a1: u8, @@ -586,8 +633,15 @@ pub const fn u8x16( ) )] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn i16x8(a0: i16, a1: i16, a2: i16, a3: i16, a4: i16, a5: i16, a6: i16, a7: i16) -> v128 { - simd::i16x8(a0, a1, a2, a3, a4, a5, a6, a7).v128() + v128( + (a0 as u16 as i32) | ((a1 as i32) << 16), + (a2 as u16 as i32) | ((a3 as i32) << 16), + (a4 as u16 as i32) | ((a5 as i32) << 16), + (a6 as u16 as i32) | ((a7 as i32) << 16), + ) } /// Materializes a SIMD value from the provided operands. @@ -597,6 +651,8 @@ pub const fn i16x8(a0: i16, a1: i16, a2: i16, a3: i16, a4: i16, a5: i16, a6: i16 #[inline] #[target_feature(enable = "simd128")] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn u16x8(a0: u16, a1: u16, a2: u16, a3: u16, a4: u16, a5: u16, a6: u16, a7: u16) -> v128 { i16x8( a0 as i16, a1 as i16, a2 as i16, a3 as i16, a4 as i16, a5 as i16, a6 as i16, a7 as i16, @@ -611,8 +667,10 @@ pub const fn u16x8(a0: u16, a1: u16, a2: u16, a3: u16, a4: u16, a5: u16, a6: u16 #[target_feature(enable = "simd128")] #[cfg_attr(test, assert_instr(v128.const, a0 = 0, a1 = 1, a2 = 2, a3 = 3))] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn i32x4(a0: i32, a1: i32, a2: i32, a3: i32) -> v128 { - simd::i32x4(a0, a1, a2, a3).v128() + v128(a0, a1, a2, a3) } /// Materializes a SIMD value from the provided operands. @@ -622,6 +680,8 @@ pub const fn i32x4(a0: i32, a1: i32, a2: i32, a3: i32) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn u32x4(a0: u32, a1: u32, a2: u32, a3: u32) -> v128 { i32x4(a0 as i32, a1 as i32, a2 as i32, a3 as i32) } @@ -632,10 +692,12 @@ pub const fn u32x4(a0: u32, a1: u32, a2: u32, a3: u32) -> v128 { /// be lowered to a sequence of instructions to materialize the vector value. #[inline] #[target_feature(enable = "simd128")] -#[cfg_attr(test, assert_instr(v128.const, a0 = 0, a1 = 1))] +#[cfg_attr(test, assert_instr(v128.const, a0 = 1, a1 = 2))] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn i64x2(a0: i64, a1: i64) -> v128 { - simd::i64x2(a0, a1).v128() + v128(a0 as i32, (a0 >> 32) as i32, a1 as i32, (a1 >> 32) as i32) } /// Materializes a SIMD value from the provided operands. @@ -645,6 +707,8 @@ pub const fn i64x2(a0: i64, a1: i64) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn u64x2(a0: u64, a1: u64) -> v128 { i64x2(a0 as i64, a1 as i64) } @@ -657,6 +721,8 @@ pub const fn u64x2(a0: u64, a1: u64) -> v128 { #[target_feature(enable = "simd128")] #[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0, a2 = 2.0, a3 = 3.0))] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_unstable(feature = "wasm_simd_const", issue = "73288")] pub const fn f32x4(a0: f32, a1: f32, a2: f32, a3: f32) -> v128 { simd::f32x4(a0, a1, a2, a3).v128() } @@ -669,6 +735,8 @@ pub const fn f32x4(a0: f32, a1: f32, a2: f32, a3: f32) -> v128 { #[target_feature(enable = "simd128")] #[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0))] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_unstable(feature = "wasm_simd_const", issue = "73288")] pub const fn f64x2(a0: f64, a1: f64) -> v128 { simd::f64x2(a0, a1).v128() } @@ -711,6 +779,7 @@ pub const fn f64x2(a0: f64, a1: f64) -> v128 { )] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shuffle"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_shuffle< const I0: usize, const I1: usize, @@ -779,6 +848,7 @@ pub fn i8x16_shuffle< shuf.v128() } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_shuffle as u8x16_shuffle; /// Same as [`i8x16_shuffle`], except operates as if the inputs were eight @@ -804,6 +874,7 @@ pub use i8x16_shuffle as u8x16_shuffle; )] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shuffle"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_shuffle< const I0: usize, const I1: usize, @@ -846,6 +917,7 @@ pub fn i16x8_shuffle< shuf.v128() } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_shuffle as u16x8_shuffle; /// Same as [`i8x16_shuffle`], except operates as if the inputs were four @@ -859,6 +931,7 @@ pub use i16x8_shuffle as u16x8_shuffle; #[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2, I2 = 4, I3 = 6))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shuffle"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_shuffle( a: v128, b: v128, @@ -877,6 +950,7 @@ pub fn i32x4_shuffle(a: v128, b: v128) -> v128 { static_assert!(I0: usize where I0 < 4); static_assert!(I1: usize where I1 < 4); @@ -903,6 +978,7 @@ pub fn i64x2_shuffle(a: v128, b: v128) -> v128 shuf.v128() } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_shuffle as u64x2_shuffle; /// Extracts a lane from a 128-bit vector interpreted as 16 packed i8 numbers. @@ -913,6 +989,7 @@ pub use i64x2_shuffle as u64x2_shuffle; #[cfg_attr(test, assert_instr(i8x16.extract_lane_s, N = 3))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.extract_lane_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_extract_lane(a: v128) -> i8 { static_assert!(N: usize where N < 16); unsafe { simd_extract(a.as_i8x16(), N as u32) } @@ -926,6 +1003,7 @@ pub fn i8x16_extract_lane(a: v128) -> i8 { #[cfg_attr(test, assert_instr(i8x16.extract_lane_u, N = 3))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.extract_lane_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_extract_lane(a: v128) -> u8 { static_assert!(N: usize where N < 16); unsafe { simd_extract(a.as_u8x16(), N as u32) } @@ -939,6 +1017,7 @@ pub fn u8x16_extract_lane(a: v128) -> u8 { #[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_replace_lane(a: v128, val: i8) -> v128 { static_assert!(N: usize where N < 16); unsafe { simd_insert(a.as_i8x16(), N as u32, val).v128() } @@ -952,6 +1031,7 @@ pub fn i8x16_replace_lane(a: v128, val: i8) -> v128 { #[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_replace_lane(a: v128, val: u8) -> v128 { static_assert!(N: usize where N < 16); unsafe { simd_insert(a.as_u8x16(), N as u32, val).v128() } @@ -965,6 +1045,7 @@ pub fn u8x16_replace_lane(a: v128, val: u8) -> v128 { #[cfg_attr(test, assert_instr(i16x8.extract_lane_s, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extract_lane_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extract_lane(a: v128) -> i16 { static_assert!(N: usize where N < 8); unsafe { simd_extract(a.as_i16x8(), N as u32) } @@ -978,6 +1059,7 @@ pub fn i16x8_extract_lane(a: v128) -> i16 { #[cfg_attr(test, assert_instr(i16x8.extract_lane_u, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extract_lane_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_extract_lane(a: v128) -> u16 { static_assert!(N: usize where N < 8); unsafe { simd_extract(a.as_u16x8(), N as u32) } @@ -991,6 +1073,7 @@ pub fn u16x8_extract_lane(a: v128) -> u16 { #[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_replace_lane(a: v128, val: i16) -> v128 { static_assert!(N: usize where N < 8); unsafe { simd_insert(a.as_i16x8(), N as u32, val).v128() } @@ -1004,6 +1087,7 @@ pub fn i16x8_replace_lane(a: v128, val: i16) -> v128 { #[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_replace_lane(a: v128, val: u16) -> v128 { static_assert!(N: usize where N < 8); unsafe { simd_insert(a.as_u16x8(), N as u32, val).v128() } @@ -1017,6 +1101,7 @@ pub fn u16x8_replace_lane(a: v128, val: u16) -> v128 { #[cfg_attr(test, assert_instr(i32x4.extract_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extract_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extract_lane(a: v128) -> i32 { static_assert!(N: usize where N < 4); unsafe { simd_extract(a.as_i32x4(), N as u32) } @@ -1029,6 +1114,7 @@ pub fn i32x4_extract_lane(a: v128) -> i32 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extract_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_extract_lane(a: v128) -> u32 { i32x4_extract_lane::(a) as u32 } @@ -1041,6 +1127,7 @@ pub fn u32x4_extract_lane(a: v128) -> u32 { #[cfg_attr(test, assert_instr(i32x4.replace_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_replace_lane(a: v128, val: i32) -> v128 { static_assert!(N: usize where N < 4); unsafe { simd_insert(a.as_i32x4(), N as u32, val).v128() } @@ -1053,6 +1140,7 @@ pub fn i32x4_replace_lane(a: v128, val: i32) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_replace_lane(a: v128, val: u32) -> v128 { i32x4_replace_lane::(a, val as i32) } @@ -1065,6 +1153,7 @@ pub fn u32x4_replace_lane(a: v128, val: u32) -> v128 { #[cfg_attr(test, assert_instr(i64x2.extract_lane, N = 1))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extract_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extract_lane(a: v128) -> i64 { static_assert!(N: usize where N < 2); unsafe { simd_extract(a.as_i64x2(), N as u32) } @@ -1077,6 +1166,7 @@ pub fn i64x2_extract_lane(a: v128) -> i64 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extract_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u64x2_extract_lane(a: v128) -> u64 { i64x2_extract_lane::(a) as u64 } @@ -1089,6 +1179,7 @@ pub fn u64x2_extract_lane(a: v128) -> u64 { #[cfg_attr(test, assert_instr(i64x2.replace_lane, N = 0))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_replace_lane(a: v128, val: i64) -> v128 { static_assert!(N: usize where N < 2); unsafe { simd_insert(a.as_i64x2(), N as u32, val).v128() } @@ -1101,6 +1192,7 @@ pub fn i64x2_replace_lane(a: v128, val: i64) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u64x2_replace_lane(a: v128, val: u64) -> v128 { i64x2_replace_lane::(a, val as i64) } @@ -1113,6 +1205,7 @@ pub fn u64x2_replace_lane(a: v128, val: u64) -> v128 { #[cfg_attr(test, assert_instr(f32x4.extract_lane, N = 1))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.extract_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_extract_lane(a: v128) -> f32 { static_assert!(N: usize where N < 4); unsafe { simd_extract(a.as_f32x4(), N as u32) } @@ -1126,6 +1219,7 @@ pub fn f32x4_extract_lane(a: v128) -> f32 { #[cfg_attr(test, assert_instr(f32x4.replace_lane, N = 1))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_replace_lane(a: v128, val: f32) -> v128 { static_assert!(N: usize where N < 4); unsafe { simd_insert(a.as_f32x4(), N as u32, val).v128() } @@ -1139,6 +1233,7 @@ pub fn f32x4_replace_lane(a: v128, val: f32) -> v128 { #[cfg_attr(test, assert_instr(f64x2.extract_lane, N = 1))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.extract_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_extract_lane(a: v128) -> f64 { static_assert!(N: usize where N < 2); unsafe { simd_extract(a.as_f64x2(), N as u32) } @@ -1152,6 +1247,7 @@ pub fn f64x2_extract_lane(a: v128) -> f64 { #[cfg_attr(test, assert_instr(f64x2.replace_lane, N = 1))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_replace_lane(a: v128, val: f64) -> v128 { static_assert!(N: usize where N < 2); unsafe { simd_insert(a.as_f64x2(), N as u32, val).v128() } @@ -1166,10 +1262,12 @@ pub fn f64x2_replace_lane(a: v128, val: f64) -> v128 { #[cfg_attr(test, assert_instr(i8x16.swizzle))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.swizzle"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_swizzle(a: v128, s: v128) -> v128 { unsafe { llvm_swizzle(a.as_i8x16(), s.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_swizzle as u8x16_swizzle; /// Creates a vector with identical lanes. @@ -1179,6 +1277,7 @@ pub use i8x16_swizzle as u8x16_swizzle; #[cfg_attr(test, assert_instr(i8x16.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_splat(a: i8) -> v128 { simd::i8x16::splat(a).v128() } @@ -1190,6 +1289,7 @@ pub fn i8x16_splat(a: i8) -> v128 { #[cfg_attr(test, assert_instr(i8x16.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_splat(a: u8) -> v128 { simd::u8x16::splat(a).v128() } @@ -1201,6 +1301,7 @@ pub fn u8x16_splat(a: u8) -> v128 { #[cfg_attr(test, assert_instr(i16x8.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_splat(a: i16) -> v128 { simd::i16x8::splat(a).v128() } @@ -1212,6 +1313,7 @@ pub fn i16x8_splat(a: i16) -> v128 { #[cfg_attr(test, assert_instr(i16x8.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_splat(a: u16) -> v128 { simd::u16x8::splat(a).v128() } @@ -1223,6 +1325,7 @@ pub fn u16x8_splat(a: u16) -> v128 { #[cfg_attr(test, assert_instr(i32x4.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_splat(a: i32) -> v128 { simd::i32x4::splat(a).v128() } @@ -1233,6 +1336,7 @@ pub fn i32x4_splat(a: i32) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_splat(a: u32) -> v128 { i32x4_splat(a as i32) } @@ -1244,6 +1348,7 @@ pub fn u32x4_splat(a: u32) -> v128 { #[cfg_attr(test, assert_instr(i64x2.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_splat(a: i64) -> v128 { simd::i64x2::splat(a).v128() } @@ -1254,6 +1359,7 @@ pub fn i64x2_splat(a: i64) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("u64x2.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u64x2_splat(a: u64) -> v128 { i64x2_splat(a as i64) } @@ -1265,6 +1371,7 @@ pub fn u64x2_splat(a: u64) -> v128 { #[cfg_attr(test, assert_instr(f32x4.splat))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_splat(a: f32) -> v128 { simd::f32x4::splat(a).v128() } @@ -1276,6 +1383,7 @@ pub fn f32x4_splat(a: f32) -> v128 { #[cfg_attr(test, assert_instr(f64x2.splat))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_splat(a: f64) -> v128 { simd::f64x2::splat(a).v128() } @@ -1289,6 +1397,7 @@ pub fn f64x2_splat(a: f64) -> v128 { #[cfg_attr(test, assert_instr(i8x16.eq))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.eq"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_eq(a: v128, b: v128) -> v128 { unsafe { simd_eq::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -1302,11 +1411,14 @@ pub fn i8x16_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.ne))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.ne"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_ne(a: v128, b: v128) -> v128 { unsafe { simd_ne::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_eq as u8x16_eq; +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_ne as u8x16_ne; /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit @@ -1318,6 +1430,7 @@ pub use i8x16_ne as u8x16_ne; #[cfg_attr(test, assert_instr(i8x16.lt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.lt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -1331,6 +1444,7 @@ pub fn i8x16_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.lt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.lt_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() } } @@ -1344,6 +1458,7 @@ pub fn u8x16_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.gt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.gt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -1357,6 +1472,7 @@ pub fn i8x16_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.gt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.gt_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() } } @@ -1370,6 +1486,7 @@ pub fn u8x16_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.le_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.le_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -1383,6 +1500,7 @@ pub fn i8x16_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.le_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.le_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() } } @@ -1396,6 +1514,7 @@ pub fn u8x16_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.ge_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.ge_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -1409,6 +1528,7 @@ pub fn i8x16_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.ge_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.ge_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() } } @@ -1422,6 +1542,7 @@ pub fn u8x16_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.eq))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.eq"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_eq(a: v128, b: v128) -> v128 { unsafe { simd_eq::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -1435,11 +1556,14 @@ pub fn i16x8_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.ne))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.ne"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_ne(a: v128, b: v128) -> v128 { unsafe { simd_ne::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_eq as u16x8_eq; +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_ne as u16x8_ne; /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit @@ -1451,6 +1575,7 @@ pub use i16x8_ne as u16x8_ne; #[cfg_attr(test, assert_instr(i16x8.lt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.lt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -1464,6 +1589,7 @@ pub fn i16x8_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.lt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.lt_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() } } @@ -1477,6 +1603,7 @@ pub fn u16x8_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.gt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.gt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -1490,6 +1617,7 @@ pub fn i16x8_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.gt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.gt_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() } } @@ -1503,6 +1631,7 @@ pub fn u16x8_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.le_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.le_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -1516,6 +1645,7 @@ pub fn i16x8_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.le_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.le_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() } } @@ -1529,6 +1659,7 @@ pub fn u16x8_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.ge_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.ge_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -1542,6 +1673,7 @@ pub fn i16x8_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.ge_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.ge_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() } } @@ -1555,6 +1687,7 @@ pub fn u16x8_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.eq))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.eq"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_eq(a: v128, b: v128) -> v128 { unsafe { simd_eq::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -1568,11 +1701,14 @@ pub fn i32x4_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.ne))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.ne"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_ne(a: v128, b: v128) -> v128 { unsafe { simd_ne::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_eq as u32x4_eq; +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_ne as u32x4_ne; /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1584,6 +1720,7 @@ pub use i32x4_ne as u32x4_ne; #[cfg_attr(test, assert_instr(i32x4.lt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.lt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -1597,6 +1734,7 @@ pub fn i32x4_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.lt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.lt_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() } } @@ -1610,6 +1748,7 @@ pub fn u32x4_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.gt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.gt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -1623,6 +1762,7 @@ pub fn i32x4_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.gt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.gt_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() } } @@ -1636,6 +1776,7 @@ pub fn u32x4_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.le_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.le_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -1649,6 +1790,7 @@ pub fn i32x4_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.le_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.le_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() } } @@ -1662,6 +1804,7 @@ pub fn u32x4_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.ge_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.ge_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -1675,6 +1818,7 @@ pub fn i32x4_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.ge_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.ge_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() } } @@ -1688,6 +1832,7 @@ pub fn u32x4_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.eq))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.eq"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_eq(a: v128, b: v128) -> v128 { unsafe { simd_eq::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1701,11 +1846,14 @@ pub fn i64x2_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.ne))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.ne"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_ne(a: v128, b: v128) -> v128 { unsafe { simd_ne::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_eq as u64x2_eq; +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_ne as u64x2_ne; /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit @@ -1717,6 +1865,7 @@ pub use i64x2_ne as u64x2_ne; #[cfg_attr(test, assert_instr(i64x2.lt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.lt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1730,6 +1879,7 @@ pub fn i64x2_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.gt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.gt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1743,6 +1893,7 @@ pub fn i64x2_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.le_s))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.le_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1756,6 +1907,7 @@ pub fn i64x2_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.ge_s))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.ge_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1769,6 +1921,7 @@ pub fn i64x2_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.eq))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.eq"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_eq(a: v128, b: v128) -> v128 { unsafe { simd_eq::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -1782,6 +1935,7 @@ pub fn f32x4_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.ne))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.ne"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_ne(a: v128, b: v128) -> v128 { unsafe { simd_ne::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -1795,6 +1949,7 @@ pub fn f32x4_ne(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.lt))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.lt"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -1808,6 +1963,7 @@ pub fn f32x4_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.gt))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.gt"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -1821,6 +1977,7 @@ pub fn f32x4_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.le))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.le"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -1834,6 +1991,7 @@ pub fn f32x4_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.ge))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.ge"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -1847,6 +2005,7 @@ pub fn f32x4_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.eq))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.eq"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_eq(a: v128, b: v128) -> v128 { unsafe { simd_eq::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -1860,6 +2019,7 @@ pub fn f64x2_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.ne))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.ne"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_ne(a: v128, b: v128) -> v128 { unsafe { simd_ne::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -1873,6 +2033,7 @@ pub fn f64x2_ne(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.lt))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.lt"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -1886,6 +2047,7 @@ pub fn f64x2_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.gt))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.gt"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -1899,6 +2061,7 @@ pub fn f64x2_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.le))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.le"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -1912,6 +2075,7 @@ pub fn f64x2_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.ge))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.ge"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -1921,6 +2085,7 @@ pub fn f64x2_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.not))] #[target_feature(enable = "simd128")] #[doc(alias("v128.not"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn v128_not(a: v128) -> v128 { unsafe { simd_xor(a.as_i64x2(), simd::i64x2(!0, !0)).v128() } } @@ -1931,6 +2096,7 @@ pub fn v128_not(a: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.and))] #[target_feature(enable = "simd128")] #[doc(alias("v128.and"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn v128_and(a: v128, b: v128) -> v128 { unsafe { simd_and(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1942,6 +2108,7 @@ pub fn v128_and(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.andnot))] #[target_feature(enable = "simd128")] #[doc(alias("v128.andnot"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn v128_andnot(a: v128, b: v128) -> v128 { unsafe { simd_and(a.as_i64x2(), simd_xor(b.as_i64x2(), simd::i64x2(-1, -1))).v128() } } @@ -1952,6 +2119,7 @@ pub fn v128_andnot(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.or))] #[target_feature(enable = "simd128")] #[doc(alias("v128.or"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn v128_or(a: v128, b: v128) -> v128 { unsafe { simd_or(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1962,6 +2130,7 @@ pub fn v128_or(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.xor))] #[target_feature(enable = "simd128")] #[doc(alias("v128.xor"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn v128_xor(a: v128, b: v128) -> v128 { unsafe { simd_xor(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1971,6 +2140,7 @@ pub fn v128_xor(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.bitselect))] #[target_feature(enable = "simd128")] #[doc(alias("v128.bitselect"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn v128_bitselect(v1: v128, v2: v128, c: v128) -> v128 { unsafe { llvm_bitselect(v1.as_i8x16(), v2.as_i8x16(), c.as_i8x16()).v128() } } @@ -1980,6 +2150,7 @@ pub fn v128_bitselect(v1: v128, v2: v128, c: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.any_true))] #[target_feature(enable = "simd128")] #[doc(alias("v128.any_true"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn v128_any_true(a: v128) -> bool { unsafe { llvm_any_true_i8x16(a.as_i8x16()) != 0 } } @@ -1989,6 +2160,7 @@ pub fn v128_any_true(a: v128) -> bool { #[cfg_attr(test, assert_instr(i8x16.abs))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.abs"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_abs(a: v128) -> v128 { unsafe { let a = a.as_i8x16(); @@ -2002,6 +2174,7 @@ pub fn i8x16_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.neg))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.neg"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_neg(a: v128) -> v128 { unsafe { simd_mul(a.as_i8x16(), simd::i8x16::splat(-1)).v128() } } @@ -2011,10 +2184,12 @@ pub fn i8x16_neg(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i8x16.popcnt))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i8x16.popcnt"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_popcnt(v: v128) -> v128 { unsafe { llvm_popcnt(v.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_popcnt as u8x16_popcnt; /// Returns true if all lanes are nonzero or false if any lane is nonzero. @@ -2022,10 +2197,12 @@ pub use i8x16_popcnt as u8x16_popcnt; #[cfg_attr(test, assert_instr(i8x16.all_true))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.all_true"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_all_true(a: v128) -> bool { unsafe { llvm_i8x16_all_true(a.as_i8x16()) != 0 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_all_true as u8x16_all_true; /// Extracts the high bit for each lane in `a` and produce a scalar mask with @@ -2034,6 +2211,7 @@ pub use i8x16_all_true as u8x16_all_true; #[cfg_attr(test, assert_instr(i8x16.bitmask))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.bitmask"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_bitmask(a: v128) -> u16 { // FIXME(https://bugs.llvm.org/show_bug.cgi?id=50507) - this produces an // extraneous `i32.and` instruction against a mask of 65535 when converting @@ -2042,6 +2220,7 @@ pub fn i8x16_bitmask(a: v128) -> u16 { unsafe { llvm_bitmask_i8x16(a.as_i8x16()) as u16 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_bitmask as u8x16_bitmask; /// Converts two input vectors into a smaller lane vector by narrowing each @@ -2053,6 +2232,7 @@ pub use i8x16_bitmask as u8x16_bitmask; #[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.narrow_i16x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_narrow_i16x8(a: v128, b: v128) -> v128 { unsafe { llvm_narrow_i8x16_s(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2066,6 +2246,7 @@ pub fn i8x16_narrow_i16x8(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.narrow_i16x8_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_narrow_i16x8(a: v128, b: v128) -> v128 { unsafe { llvm_narrow_i8x16_u(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2078,10 +2259,12 @@ pub fn u8x16_narrow_i16x8(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.shl))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shl"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_shl(a: v128, amt: u32) -> v128 { unsafe { simd_shl(a.as_i8x16(), simd::i8x16::splat(amt as i8)).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_shl as u8x16_shl; /// Shifts each lane to the right by the specified number of bits, sign @@ -2093,6 +2276,7 @@ pub use i8x16_shl as u8x16_shl; #[cfg_attr(test, assert_instr(i8x16.shr_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shr_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_i8x16(), simd::i8x16::splat(amt as i8)).v128() } } @@ -2106,6 +2290,7 @@ pub fn i8x16_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i8x16.shr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shr_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_u8x16(), simd::u8x16::splat(amt as u8)).v128() } } @@ -2115,10 +2300,12 @@ pub fn u8x16_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i8x16.add))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.add"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_add(a: v128, b: v128) -> v128 { unsafe { simd_add(a.as_i8x16(), b.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_add as u8x16_add; /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit signed @@ -2127,6 +2314,7 @@ pub use i8x16_add as u8x16_add; #[cfg_attr(test, assert_instr(i8x16.add_sat_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.add_sat_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_add_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i8x16_add_sat_s(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -2137,6 +2325,7 @@ pub fn i8x16_add_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.add_sat_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.add_sat_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_add_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i8x16_add_sat_u(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -2146,10 +2335,12 @@ pub fn u8x16_add_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.sub))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.sub"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_sub(a: v128, b: v128) -> v128 { unsafe { simd_sub(a.as_i8x16(), b.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_sub as u8x16_sub; /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit @@ -2158,6 +2349,7 @@ pub use i8x16_sub as u8x16_sub; #[cfg_attr(test, assert_instr(i8x16.sub_sat_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.sub_sat_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_sub_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i8x16_sub_sat_s(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -2168,6 +2360,7 @@ pub fn i8x16_sub_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.sub_sat_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.sub_sat_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_sub_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i8x16_sub_sat_u(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -2178,6 +2371,7 @@ pub fn u8x16_sub_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.min_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.min_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_min(a: v128, b: v128) -> v128 { let a = a.as_i8x16(); let b = b.as_i8x16(); @@ -2190,6 +2384,7 @@ pub fn i8x16_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.min_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.min_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_min(a: v128, b: v128) -> v128 { let a = a.as_u8x16(); let b = b.as_u8x16(); @@ -2202,6 +2397,7 @@ pub fn u8x16_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.max_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.max_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_max(a: v128, b: v128) -> v128 { let a = a.as_i8x16(); let b = b.as_i8x16(); @@ -2214,6 +2410,7 @@ pub fn i8x16_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.max_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.max_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_max(a: v128, b: v128) -> v128 { let a = a.as_u8x16(); let b = b.as_u8x16(); @@ -2225,6 +2422,7 @@ pub fn u8x16_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.avgr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.avgr_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_avgr(a: v128, b: v128) -> v128 { unsafe { llvm_avgr_u_i8x16(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -2235,6 +2433,7 @@ pub fn u8x16_avgr(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extadd_pairwise_i8x16_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extadd_pairwise_i8x16(a: v128) -> v128 { unsafe { llvm_i16x8_extadd_pairwise_i8x16_s(a.as_i8x16()).v128() } } @@ -2245,10 +2444,12 @@ pub fn i16x8_extadd_pairwise_i8x16(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extadd_pairwise_i8x16_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extadd_pairwise_u8x16(a: v128) -> v128 { unsafe { llvm_i16x8_extadd_pairwise_i8x16_u(a.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_extadd_pairwise_u8x16 as u16x8_extadd_pairwise_u8x16; /// Lane-wise wrapping absolute value. @@ -2256,6 +2457,7 @@ pub use i16x8_extadd_pairwise_u8x16 as u16x8_extadd_pairwise_u8x16; #[cfg_attr(test, assert_instr(i16x8.abs))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.abs"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_abs(a: v128) -> v128 { let a = a.as_i16x8(); let zero = simd::i16x8::splat(0); @@ -2269,6 +2471,7 @@ pub fn i16x8_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.neg))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.neg"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_neg(a: v128) -> v128 { unsafe { simd_mul(a.as_i16x8(), simd::i16x8::splat(-1)).v128() } } @@ -2278,6 +2481,7 @@ pub fn i16x8_neg(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.qmulr_sat_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.q15mulr_sat_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_q15mulr_sat(a: v128, b: v128) -> v128 { unsafe { llvm_q15mulr(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2287,10 +2491,12 @@ pub fn i16x8_q15mulr_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.all_true))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.all_true"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_all_true(a: v128) -> bool { unsafe { llvm_i16x8_all_true(a.as_i16x8()) != 0 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_all_true as u16x8_all_true; /// Extracts the high bit for each lane in `a` and produce a scalar mask with @@ -2299,10 +2505,12 @@ pub use i16x8_all_true as u16x8_all_true; #[cfg_attr(test, assert_instr(i16x8.bitmask))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.bitmask"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_bitmask(a: v128) -> u8 { unsafe { llvm_bitmask_i16x8(a.as_i16x8()) as u8 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_bitmask as u16x8_bitmask; /// Converts two input vectors into a smaller lane vector by narrowing each @@ -2314,6 +2522,7 @@ pub use i16x8_bitmask as u16x8_bitmask; #[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.narrow_i32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_narrow_i32x4(a: v128, b: v128) -> v128 { unsafe { llvm_narrow_i16x8_s(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -2327,6 +2536,7 @@ pub fn i16x8_narrow_i32x4(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.narrow_i32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_narrow_i32x4(a: v128, b: v128) -> v128 { unsafe { llvm_narrow_i16x8_u(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -2337,6 +2547,7 @@ pub fn u16x8_narrow_i32x4(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extend_low_i8x16_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extend_low_i8x16(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle8!( @@ -2354,6 +2565,7 @@ pub fn i16x8_extend_low_i8x16(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extend_high_i8x16_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extend_high_i8x16(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle8!( @@ -2371,6 +2583,7 @@ pub fn i16x8_extend_high_i8x16(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extend_low_i8x16_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extend_low_u8x16(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle8!( @@ -2382,6 +2595,7 @@ pub fn i16x8_extend_low_u8x16(a: v128) -> v128 { } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_extend_low_u8x16 as u16x8_extend_low_u8x16; /// Converts high half of the smaller lane vector to a larger lane @@ -2390,6 +2604,7 @@ pub use i16x8_extend_low_u8x16 as u16x8_extend_low_u8x16; #[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extend_high_i8x16_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extend_high_u8x16(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle8!( @@ -2401,6 +2616,7 @@ pub fn i16x8_extend_high_u8x16(a: v128) -> v128 { } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_extend_high_u8x16 as u16x8_extend_high_u8x16; /// Shifts each lane to the left by the specified number of bits. @@ -2411,10 +2627,12 @@ pub use i16x8_extend_high_u8x16 as u16x8_extend_high_u8x16; #[cfg_attr(test, assert_instr(i16x8.shl))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.shl"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_shl(a: v128, amt: u32) -> v128 { unsafe { simd_shl(a.as_i16x8(), simd::i16x8::splat(amt as i16)).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_shl as u16x8_shl; /// Shifts each lane to the right by the specified number of bits, sign @@ -2426,6 +2644,7 @@ pub use i16x8_shl as u16x8_shl; #[cfg_attr(test, assert_instr(i16x8.shr_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.shr_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_i16x8(), simd::i16x8::splat(amt as i16)).v128() } } @@ -2439,6 +2658,7 @@ pub fn i16x8_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i16x8.shr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.shr_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_u16x8(), simd::u16x8::splat(amt as u16)).v128() } } @@ -2448,10 +2668,12 @@ pub fn u16x8_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i16x8.add))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.add"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_add(a: v128, b: v128) -> v128 { unsafe { simd_add(a.as_i16x8(), b.as_i16x8()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_add as u16x8_add; /// Adds two 128-bit vectors as if they were two packed eight 16-bit signed @@ -2460,6 +2682,7 @@ pub use i16x8_add as u16x8_add; #[cfg_attr(test, assert_instr(i16x8.add_sat_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.add_sat_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_add_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_add_sat_s(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2470,6 +2693,7 @@ pub fn i16x8_add_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.add_sat_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.add_sat_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_add_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_add_sat_u(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2479,10 +2703,12 @@ pub fn u16x8_add_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.sub))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.sub"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_sub(a: v128, b: v128) -> v128 { unsafe { simd_sub(a.as_i16x8(), b.as_i16x8()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_sub as u16x8_sub; /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit @@ -2491,6 +2717,7 @@ pub use i16x8_sub as u16x8_sub; #[cfg_attr(test, assert_instr(i16x8.sub_sat_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.sub_sat_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_sub_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_sub_sat_s(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2501,6 +2728,7 @@ pub fn i16x8_sub_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.sub_sat_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.sub_sat_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_sub_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_sub_sat_u(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2511,10 +2739,12 @@ pub fn u16x8_sub_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.mul))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.mul"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_mul(a: v128, b: v128) -> v128 { unsafe { simd_mul(a.as_i16x8(), b.as_i16x8()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_mul as u16x8_mul; /// Compares lane-wise signed integers, and returns the minimum of @@ -2523,6 +2753,7 @@ pub use i16x8_mul as u16x8_mul; #[cfg_attr(test, assert_instr(i16x8.min_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.min_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_min(a: v128, b: v128) -> v128 { let a = a.as_i16x8(); let b = b.as_i16x8(); @@ -2535,6 +2766,7 @@ pub fn i16x8_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.min_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.min_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_min(a: v128, b: v128) -> v128 { let a = a.as_u16x8(); let b = b.as_u16x8(); @@ -2547,6 +2779,7 @@ pub fn u16x8_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.max_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.max_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_max(a: v128, b: v128) -> v128 { let a = a.as_i16x8(); let b = b.as_i16x8(); @@ -2559,6 +2792,7 @@ pub fn i16x8_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.max_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.max_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_max(a: v128, b: v128) -> v128 { let a = a.as_u16x8(); let b = b.as_u16x8(); @@ -2570,6 +2804,7 @@ pub fn u16x8_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.avgr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.avgr_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_avgr(a: v128, b: v128) -> v128 { unsafe { llvm_avgr_u_i16x8(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2582,6 +2817,7 @@ pub fn u16x8_avgr(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extmul_low_i8x16_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extmul_low_i8x16(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_extmul_low_i8x16_s(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -2594,6 +2830,7 @@ pub fn i16x8_extmul_low_i8x16(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extmul_high_i8x16_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extmul_high_i8x16(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_extmul_high_i8x16_s(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -2606,10 +2843,12 @@ pub fn i16x8_extmul_high_i8x16(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extmul_low_i8x16_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extmul_low_u8x16(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_extmul_low_i8x16_u(a.as_i8x16(), b.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_extmul_low_u8x16 as u16x8_extmul_low_u8x16; /// Lane-wise integer extended multiplication producing twice wider result than @@ -2620,10 +2859,12 @@ pub use i16x8_extmul_low_u8x16 as u16x8_extmul_low_u8x16; // #[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extmul_high_i8x16_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extmul_high_u8x16(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_extmul_high_i8x16_u(a.as_i8x16(), b.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_extmul_high_u8x16 as u16x8_extmul_high_u8x16; /// Lane-wise integer extended pairwise addition producing extended results @@ -2632,6 +2873,7 @@ pub use i16x8_extmul_high_u8x16 as u16x8_extmul_high_u8x16; // #[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extadd_pairwise_i16x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extadd_pairwise_i16x8(a: v128) -> v128 { unsafe { llvm_i32x4_extadd_pairwise_i16x8_s(a.as_i16x8()).v128() } } @@ -2642,10 +2884,12 @@ pub fn i32x4_extadd_pairwise_i16x8(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_u))] // FIXME wasmtime #[doc(alias("i32x4.extadd_pairwise_i16x8_u"))] #[target_feature(enable = "simd128")] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extadd_pairwise_u16x8(a: v128) -> v128 { unsafe { llvm_i32x4_extadd_pairwise_i16x8_u(a.as_i16x8()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_extadd_pairwise_u16x8 as u32x4_extadd_pairwise_u16x8; /// Lane-wise wrapping absolute value. @@ -2653,6 +2897,7 @@ pub use i32x4_extadd_pairwise_u16x8 as u32x4_extadd_pairwise_u16x8; #[cfg_attr(test, assert_instr(i32x4.abs))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.abs"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_abs(a: v128) -> v128 { let a = a.as_i32x4(); let zero = simd::i32x4::splat(0); @@ -2666,6 +2911,7 @@ pub fn i32x4_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.neg))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.neg"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_neg(a: v128) -> v128 { unsafe { simd_mul(a.as_i32x4(), simd::i32x4::splat(-1)).v128() } } @@ -2675,10 +2921,12 @@ pub fn i32x4_neg(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.all_true))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.all_true"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_all_true(a: v128) -> bool { unsafe { llvm_i32x4_all_true(a.as_i32x4()) != 0 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_all_true as u32x4_all_true; /// Extracts the high bit for each lane in `a` and produce a scalar mask with @@ -2687,10 +2935,12 @@ pub use i32x4_all_true as u32x4_all_true; #[cfg_attr(test, assert_instr(i32x4.bitmask))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.bitmask"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_bitmask(a: v128) -> u8 { unsafe { llvm_bitmask_i32x4(a.as_i32x4()) as u8 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_bitmask as u32x4_bitmask; /// Converts low half of the smaller lane vector to a larger lane @@ -2699,6 +2949,7 @@ pub use i32x4_bitmask as u32x4_bitmask; #[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extend_low_i16x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extend_low_i16x8(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle4!( @@ -2716,6 +2967,7 @@ pub fn i32x4_extend_low_i16x8(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extend_high_i16x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extend_high_i16x8(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle4!( @@ -2733,6 +2985,7 @@ pub fn i32x4_extend_high_i16x8(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extend_low_i16x8_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extend_low_u16x8(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle4!( @@ -2744,6 +2997,7 @@ pub fn i32x4_extend_low_u16x8(a: v128) -> v128 { } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_extend_low_u16x8 as u32x4_extend_low_u16x8; /// Converts high half of the smaller lane vector to a larger lane @@ -2752,6 +3006,7 @@ pub use i32x4_extend_low_u16x8 as u32x4_extend_low_u16x8; #[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extend_high_i16x8_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extend_high_u16x8(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle4!( @@ -2763,6 +3018,7 @@ pub fn i32x4_extend_high_u16x8(a: v128) -> v128 { } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_extend_high_u16x8 as u32x4_extend_high_u16x8; /// Shifts each lane to the left by the specified number of bits. @@ -2773,10 +3029,12 @@ pub use i32x4_extend_high_u16x8 as u32x4_extend_high_u16x8; #[cfg_attr(test, assert_instr(i32x4.shl))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.shl"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_shl(a: v128, amt: u32) -> v128 { unsafe { simd_shl(a.as_i32x4(), simd::i32x4::splat(amt as i32)).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_shl as u32x4_shl; /// Shifts each lane to the right by the specified number of bits, sign @@ -2788,6 +3046,7 @@ pub use i32x4_shl as u32x4_shl; #[cfg_attr(test, assert_instr(i32x4.shr_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.shr_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_i32x4(), simd::i32x4::splat(amt as i32)).v128() } } @@ -2801,6 +3060,7 @@ pub fn i32x4_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i32x4.shr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.shr_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_u32x4(), simd::u32x4::splat(amt as u32)).v128() } } @@ -2810,10 +3070,12 @@ pub fn u32x4_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i32x4.add))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.add"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_add(a: v128, b: v128) -> v128 { unsafe { simd_add(a.as_i32x4(), b.as_i32x4()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_add as u32x4_add; /// Subtracts two 128-bit vectors as if they were two packed four 32-bit integers. @@ -2821,10 +3083,12 @@ pub use i32x4_add as u32x4_add; #[cfg_attr(test, assert_instr(i32x4.sub))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.sub"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_sub(a: v128, b: v128) -> v128 { unsafe { simd_sub(a.as_i32x4(), b.as_i32x4()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_sub as u32x4_sub; /// Multiplies two 128-bit vectors as if they were two packed four 32-bit @@ -2833,10 +3097,12 @@ pub use i32x4_sub as u32x4_sub; #[cfg_attr(test, assert_instr(i32x4.mul))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.mul"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_mul(a: v128, b: v128) -> v128 { unsafe { simd_mul(a.as_i32x4(), b.as_i32x4()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_mul as u32x4_mul; /// Compares lane-wise signed integers, and returns the minimum of @@ -2845,6 +3111,7 @@ pub use i32x4_mul as u32x4_mul; #[cfg_attr(test, assert_instr(i32x4.min_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.min_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_min(a: v128, b: v128) -> v128 { let a = a.as_i32x4(); let b = b.as_i32x4(); @@ -2857,6 +3124,7 @@ pub fn i32x4_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.min_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.min_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_min(a: v128, b: v128) -> v128 { let a = a.as_u32x4(); let b = b.as_u32x4(); @@ -2869,6 +3137,7 @@ pub fn u32x4_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.max_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.max_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_max(a: v128, b: v128) -> v128 { let a = a.as_i32x4(); let b = b.as_i32x4(); @@ -2881,6 +3150,7 @@ pub fn i32x4_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.max_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.max_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_max(a: v128, b: v128) -> v128 { let a = a.as_u32x4(); let b = b.as_u32x4(); @@ -2893,6 +3163,7 @@ pub fn u32x4_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.dot_i16x8_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.dot_i16x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_dot_i16x8(a: v128, b: v128) -> v128 { unsafe { llvm_i32x4_dot_i16x8_s(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2905,6 +3176,7 @@ pub fn i32x4_dot_i16x8(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extmul_low_i16x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extmul_low_i16x8(a: v128, b: v128) -> v128 { unsafe { llvm_i32x4_extmul_low_i16x8_s(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2917,6 +3189,7 @@ pub fn i32x4_extmul_low_i16x8(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extmul_high_i16x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extmul_high_i16x8(a: v128, b: v128) -> v128 { unsafe { llvm_i32x4_extmul_high_i16x8_s(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2929,10 +3202,12 @@ pub fn i32x4_extmul_high_i16x8(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extmul_low_i16x8_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extmul_low_u16x8(a: v128, b: v128) -> v128 { unsafe { llvm_i32x4_extmul_low_i16x8_u(a.as_i16x8(), b.as_i16x8()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_extmul_low_u16x8 as u32x4_extmul_low_u16x8; /// Lane-wise integer extended multiplication producing twice wider result than @@ -2943,10 +3218,12 @@ pub use i32x4_extmul_low_u16x8 as u32x4_extmul_low_u16x8; // #[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extmul_high_i16x8_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extmul_high_u16x8(a: v128, b: v128) -> v128 { unsafe { llvm_i32x4_extmul_high_i16x8_u(a.as_i16x8(), b.as_i16x8()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_extmul_high_u16x8 as u32x4_extmul_high_u16x8; /// Lane-wise wrapping absolute value. @@ -2954,6 +3231,7 @@ pub use i32x4_extmul_high_u16x8 as u32x4_extmul_high_u16x8; // #[cfg_attr(test, assert_instr(i64x2.abs))] // FIXME llvm #[target_feature(enable = "simd128")] #[doc(alias("i64x2.abs"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_abs(a: v128) -> v128 { let a = a.as_i64x2(); let zero = simd::i64x2::splat(0); @@ -2967,6 +3245,7 @@ pub fn i64x2_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.neg))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.neg"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_neg(a: v128) -> v128 { unsafe { simd_mul(a.as_i64x2(), simd::i64x2::splat(-1)).v128() } } @@ -2976,10 +3255,12 @@ pub fn i64x2_neg(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.all_true))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.all_true"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_all_true(a: v128) -> bool { unsafe { llvm_i64x2_all_true(a.as_i64x2()) != 0 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_all_true as u64x2_all_true; /// Extracts the high bit for each lane in `a` and produce a scalar mask with @@ -2988,10 +3269,12 @@ pub use i64x2_all_true as u64x2_all_true; #[cfg_attr(test, assert_instr(i64x2.bitmask))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.bitmask"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_bitmask(a: v128) -> u8 { unsafe { llvm_bitmask_i64x2(a.as_i64x2()) as u8 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_bitmask as u64x2_bitmask; /// Converts low half of the smaller lane vector to a larger lane @@ -3000,6 +3283,7 @@ pub use i64x2_bitmask as u64x2_bitmask; // #[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extend_low_i32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extend_low_i32x4(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle2!(a.as_i32x4(), a.as_i32x4(), [0, 1])) @@ -3013,6 +3297,7 @@ pub fn i64x2_extend_low_i32x4(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extend_high_i32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extend_high_i32x4(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle2!(a.as_i32x4(), a.as_i32x4(), [2, 3])) @@ -3026,6 +3311,7 @@ pub fn i64x2_extend_high_i32x4(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extend_low_i32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extend_low_u32x4(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle2!(a.as_u32x4(), a.as_u32x4(), [0, 1])) @@ -3033,6 +3319,7 @@ pub fn i64x2_extend_low_u32x4(a: v128) -> v128 { } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_extend_low_u32x4 as u64x2_extend_low_u32x4; /// Converts high half of the smaller lane vector to a larger lane @@ -3041,6 +3328,7 @@ pub use i64x2_extend_low_u32x4 as u64x2_extend_low_u32x4; // #[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extend_high_i32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extend_high_u32x4(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle2!(a.as_u32x4(), a.as_u32x4(), [2, 3])) @@ -3048,6 +3336,7 @@ pub fn i64x2_extend_high_u32x4(a: v128) -> v128 { } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_extend_high_u32x4 as u64x2_extend_high_u32x4; /// Shifts each lane to the left by the specified number of bits. @@ -3058,10 +3347,12 @@ pub use i64x2_extend_high_u32x4 as u64x2_extend_high_u32x4; #[cfg_attr(test, assert_instr(i64x2.shl))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.shl"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_shl(a: v128, amt: u32) -> v128 { unsafe { simd_shl(a.as_i64x2(), simd::i64x2::splat(amt as i64)).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_shl as u64x2_shl; /// Shifts each lane to the right by the specified number of bits, sign @@ -3073,6 +3364,7 @@ pub use i64x2_shl as u64x2_shl; #[cfg_attr(test, assert_instr(i64x2.shr_s))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.shr_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_i64x2(), simd::i64x2::splat(amt as i64)).v128() } } @@ -3086,6 +3378,7 @@ pub fn i64x2_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i64x2.shr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.shr_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u64x2_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_u64x2(), simd::u64x2::splat(amt as u64)).v128() } } @@ -3095,10 +3388,12 @@ pub fn u64x2_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i64x2.add))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.add"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_add(a: v128, b: v128) -> v128 { unsafe { simd_add(a.as_i64x2(), b.as_i64x2()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_add as u64x2_add; /// Subtracts two 128-bit vectors as if they were two packed two 64-bit integers. @@ -3106,10 +3401,12 @@ pub use i64x2_add as u64x2_add; #[cfg_attr(test, assert_instr(i64x2.sub))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.sub"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_sub(a: v128, b: v128) -> v128 { unsafe { simd_sub(a.as_i64x2(), b.as_i64x2()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_sub as u64x2_sub; /// Multiplies two 128-bit vectors as if they were two packed two 64-bit integers. @@ -3117,10 +3414,12 @@ pub use i64x2_sub as u64x2_sub; #[cfg_attr(test, assert_instr(i64x2.mul))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.mul"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_mul(a: v128, b: v128) -> v128 { unsafe { simd_mul(a.as_i64x2(), b.as_i64x2()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_mul as u64x2_mul; /// Lane-wise integer extended multiplication producing twice wider result than @@ -3131,6 +3430,7 @@ pub use i64x2_mul as u64x2_mul; // #[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extmul_low_i32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extmul_low_i32x4(a: v128, b: v128) -> v128 { unsafe { llvm_i64x2_extmul_low_i32x4_s(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -3143,6 +3443,7 @@ pub fn i64x2_extmul_low_i32x4(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extmul_high_i32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extmul_high_i32x4(a: v128, b: v128) -> v128 { unsafe { llvm_i64x2_extmul_high_i32x4_s(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -3155,10 +3456,12 @@ pub fn i64x2_extmul_high_i32x4(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extmul_low_i32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extmul_low_u32x4(a: v128, b: v128) -> v128 { unsafe { llvm_i64x2_extmul_low_i32x4_u(a.as_i32x4(), b.as_i32x4()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_extmul_low_u32x4 as u64x2_extmul_low_u32x4; /// Lane-wise integer extended multiplication producing twice wider result than @@ -3169,10 +3472,12 @@ pub use i64x2_extmul_low_u32x4 as u64x2_extmul_low_u32x4; // #[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extmul_high_i32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extmul_high_u32x4(a: v128, b: v128) -> v128 { unsafe { llvm_i64x2_extmul_high_i32x4_u(a.as_i32x4(), b.as_i32x4()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_extmul_high_u32x4 as u64x2_extmul_high_u32x4; /// Lane-wise rounding to the nearest integral value not smaller than the input. @@ -3180,6 +3485,7 @@ pub use i64x2_extmul_high_u32x4 as u64x2_extmul_high_u32x4; #[cfg_attr(test, assert_instr(f32x4.ceil))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.ceil"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_ceil(a: v128) -> v128 { unsafe { llvm_f32x4_ceil(a.as_f32x4()).v128() } } @@ -3189,6 +3495,7 @@ pub fn f32x4_ceil(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.floor))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.floor"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_floor(a: v128) -> v128 { unsafe { llvm_f32x4_floor(a.as_f32x4()).v128() } } @@ -3199,6 +3506,7 @@ pub fn f32x4_floor(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.trunc))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.trunc"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_trunc(a: v128) -> v128 { unsafe { llvm_f32x4_trunc(a.as_f32x4()).v128() } } @@ -3209,6 +3517,7 @@ pub fn f32x4_trunc(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.nearest))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.nearest"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_nearest(a: v128) -> v128 { unsafe { llvm_f32x4_nearest(a.as_f32x4()).v128() } } @@ -3219,6 +3528,7 @@ pub fn f32x4_nearest(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.abs))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.abs"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_abs(a: v128) -> v128 { unsafe { llvm_f32x4_abs(a.as_f32x4()).v128() } } @@ -3229,6 +3539,7 @@ pub fn f32x4_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.neg))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.neg"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_neg(a: v128) -> v128 { f32x4_mul(a, f32x4_splat(-1.)) } @@ -3239,6 +3550,7 @@ pub fn f32x4_neg(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.sqrt))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.sqrt"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_sqrt(a: v128) -> v128 { unsafe { llvm_f32x4_sqrt(a.as_f32x4()).v128() } } @@ -3249,6 +3561,7 @@ pub fn f32x4_sqrt(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.add))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.add"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_add(a: v128, b: v128) -> v128 { unsafe { simd_add(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3259,6 +3572,7 @@ pub fn f32x4_add(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.sub))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.sub"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_sub(a: v128, b: v128) -> v128 { unsafe { simd_sub(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3269,6 +3583,7 @@ pub fn f32x4_sub(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.mul))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.mul"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_mul(a: v128, b: v128) -> v128 { unsafe { simd_mul(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3279,6 +3594,7 @@ pub fn f32x4_mul(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.div))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.div"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_div(a: v128, b: v128) -> v128 { unsafe { simd_div(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3289,6 +3605,7 @@ pub fn f32x4_div(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.min))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.min"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_min(a: v128, b: v128) -> v128 { unsafe { llvm_f32x4_min(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3299,6 +3616,7 @@ pub fn f32x4_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.max))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.max"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_max(a: v128, b: v128) -> v128 { unsafe { llvm_f32x4_max(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3308,6 +3626,7 @@ pub fn f32x4_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.pmin))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.pmin"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_pmin(a: v128, b: v128) -> v128 { unsafe { llvm_f32x4_pmin(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3317,6 +3636,7 @@ pub fn f32x4_pmin(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.pmax))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.pmax"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_pmax(a: v128, b: v128) -> v128 { unsafe { llvm_f32x4_pmax(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3326,6 +3646,7 @@ pub fn f32x4_pmax(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.ceil))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.ceil"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_ceil(a: v128) -> v128 { unsafe { llvm_f64x2_ceil(a.as_f64x2()).v128() } } @@ -3335,6 +3656,7 @@ pub fn f64x2_ceil(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.floor))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.floor"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_floor(a: v128) -> v128 { unsafe { llvm_f64x2_floor(a.as_f64x2()).v128() } } @@ -3345,6 +3667,7 @@ pub fn f64x2_floor(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.trunc))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.trunc"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_trunc(a: v128) -> v128 { unsafe { llvm_f64x2_trunc(a.as_f64x2()).v128() } } @@ -3355,6 +3678,7 @@ pub fn f64x2_trunc(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.nearest))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.nearest"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_nearest(a: v128) -> v128 { unsafe { llvm_f64x2_nearest(a.as_f64x2()).v128() } } @@ -3365,6 +3689,7 @@ pub fn f64x2_nearest(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.abs))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.abs"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_abs(a: v128) -> v128 { unsafe { llvm_f64x2_abs(a.as_f64x2()).v128() } } @@ -3375,6 +3700,7 @@ pub fn f64x2_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.neg))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.neg"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_neg(a: v128) -> v128 { f64x2_mul(a, f64x2_splat(-1.0)) } @@ -3385,6 +3711,7 @@ pub fn f64x2_neg(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.sqrt))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.sqrt"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_sqrt(a: v128) -> v128 { unsafe { llvm_f64x2_sqrt(a.as_f64x2()).v128() } } @@ -3395,6 +3722,7 @@ pub fn f64x2_sqrt(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.add))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.add"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_add(a: v128, b: v128) -> v128 { unsafe { simd_add(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3405,6 +3733,7 @@ pub fn f64x2_add(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.sub))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.sub"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_sub(a: v128, b: v128) -> v128 { unsafe { simd_sub(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3415,6 +3744,7 @@ pub fn f64x2_sub(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.mul))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.mul"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_mul(a: v128, b: v128) -> v128 { unsafe { simd_mul(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3425,6 +3755,7 @@ pub fn f64x2_mul(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.div))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.div"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_div(a: v128, b: v128) -> v128 { unsafe { simd_div(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3435,6 +3766,7 @@ pub fn f64x2_div(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.min))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.min"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_min(a: v128, b: v128) -> v128 { unsafe { llvm_f64x2_min(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3445,6 +3777,7 @@ pub fn f64x2_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.max))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.max"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_max(a: v128, b: v128) -> v128 { unsafe { llvm_f64x2_max(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3454,6 +3787,7 @@ pub fn f64x2_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.pmin))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.pmin"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_pmin(a: v128, b: v128) -> v128 { unsafe { llvm_f64x2_pmin(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3463,6 +3797,7 @@ pub fn f64x2_pmin(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.pmax))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.pmax"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_pmax(a: v128, b: v128) -> v128 { unsafe { llvm_f64x2_pmax(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3476,6 +3811,7 @@ pub fn f64x2_pmax(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.trunc_sat_f32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_trunc_sat_f32x4(a: v128) -> v128 { unsafe { llvm_i32x4_trunc_sat_f32x4_s(a.as_f32x4()).v128() } } @@ -3489,6 +3825,7 @@ pub fn i32x4_trunc_sat_f32x4(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.trunc_sat_f32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_trunc_sat_f32x4(a: v128) -> v128 { unsafe { llvm_i32x4_trunc_sat_f32x4_u(a.as_f32x4()).v128() } } @@ -3499,6 +3836,7 @@ pub fn u32x4_trunc_sat_f32x4(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.convert_i32x4_s))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.convert_i32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_convert_i32x4(a: v128) -> v128 { unsafe { simd_cast::<_, simd::f32x4>(a.as_i32x4()).v128() } } @@ -3509,6 +3847,7 @@ pub fn f32x4_convert_i32x4(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.convert_i32x4_u))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.convert_i32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_convert_u32x4(a: v128) -> v128 { unsafe { simd_cast::<_, simd::f32x4>(a.as_u32x4()).v128() } } @@ -3525,6 +3864,7 @@ pub fn f32x4_convert_u32x4(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_s_zero))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.trunc_sat_f64x2_s_zero"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_trunc_sat_f64x2_zero(a: v128) -> v128 { let ret: simd::i32x4 = unsafe { simd_shuffle4!( @@ -3548,6 +3888,7 @@ pub fn i32x4_trunc_sat_f64x2_zero(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_u_zero))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.trunc_sat_f64x2_u_zero"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_trunc_sat_f64x2_zero(a: v128) -> v128 { let ret: simd::i32x4 = unsafe { simd_shuffle4!( @@ -3564,6 +3905,7 @@ pub fn u32x4_trunc_sat_f64x2_zero(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_s))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.convert_low_i32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_convert_low_i32x4(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle2!(a.as_i32x4(), a.as_i32x4(), [0, 1],)) @@ -3576,6 +3918,7 @@ pub fn f64x2_convert_low_i32x4(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("f64x2.convert_low_i32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_convert_low_u32x4(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle2!(a.as_u32x4(), a.as_u32x4(), [0, 1],)) @@ -3592,6 +3935,7 @@ pub fn f64x2_convert_low_u32x4(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(f32x4.demote_f64x2_zero))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("f32x4.demote_f64x2_zero"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_demote_f64x2_zero(a: v128) -> v128 { unsafe { llvm_f32x4_demote_f64x2_zero(a.as_f64x2()).v128() } } @@ -3602,6 +3946,7 @@ pub fn f32x4_demote_f64x2_zero(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(f64x2.promote_low_f32x4))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("f32x4.promote_low_f32x4"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_promote_low_f32x4(a: v128) -> v128 { unsafe { llvm_f64x2_promote_low_f32x4(a.as_f32x4()).v128() } } @@ -3739,6 +4084,18 @@ pub mod tests { const _: v128 = i64x2(0, 1); const _: v128 = f32x4(0., 1., 2., 3.); const _: v128 = f64x2(0., 1.); + + let bytes: [i16; 8] = unsafe { mem::transmute(i16x8(-1, -2, -3, -4, -5, -6, -7, -8)) }; + assert_eq!(bytes, [-1, -2, -3, -4, -5, -6, -7, -8]); + let bytes: [i8; 16] = unsafe { + mem::transmute(i8x16( + -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, + )) + }; + assert_eq!( + bytes, + [-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] + ); } #[test] diff --git a/examples/hex.rs b/examples/hex.rs index 8d41517f33..812836d661 100644 --- a/examples/hex.rs +++ b/examples/hex.rs @@ -14,7 +14,6 @@ #![feature(stdsimd, wasm_target_feature)] #![cfg_attr(test, feature(test))] -#![cfg_attr(target_arch = "wasm32", feature(wasm_simd))] #![allow( clippy::unwrap_used, clippy::print_stdout,