diff --git a/library/core/src/intrinsics/simd.rs b/library/core/src/intrinsics/simd.rs index c56e04bfc2d90..722a765cd01ee 100644 --- a/library/core/src/intrinsics/simd.rs +++ b/library/core/src/intrinsics/simd.rs @@ -64,21 +64,21 @@ pub unsafe fn simd_extract_dyn(x: T, idx: u32) -> U { /// `T` must be a vector of integers or floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_add(x: T, y: T) -> T; +pub const unsafe fn simd_add(x: T, y: T) -> T; /// Subtracts `rhs` from `lhs` elementwise. /// /// `T` must be a vector of integers or floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_sub(lhs: T, rhs: T) -> T; +pub const unsafe fn simd_sub(lhs: T, rhs: T) -> T; /// Multiplies two simd vectors elementwise. /// /// `T` must be a vector of integers or floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_mul(x: T, y: T) -> T; +pub const unsafe fn simd_mul(x: T, y: T) -> T; /// Divides `lhs` by `rhs` elementwise. /// @@ -89,7 +89,7 @@ pub unsafe fn simd_mul(x: T, y: T) -> T; /// Additionally for signed integers, `::MIN / -1` is undefined behavior. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_div(lhs: T, rhs: T) -> T; +pub const unsafe fn simd_div(lhs: T, rhs: T) -> T; /// Returns remainder of two vectors elementwise. /// @@ -100,7 +100,7 @@ pub unsafe fn simd_div(lhs: T, rhs: T) -> T; /// Additionally for signed integers, `::MIN / -1` is undefined behavior. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_rem(lhs: T, rhs: T) -> T; +pub const unsafe fn simd_rem(lhs: T, rhs: T) -> T; /// Shifts vector left elementwise, with UB on overflow. /// @@ -113,7 +113,7 @@ pub unsafe fn simd_rem(lhs: T, rhs: T) -> T; /// Each element of `rhs` must be less than `::BITS`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_shl(lhs: T, rhs: T) -> T; +pub const unsafe fn simd_shl(lhs: T, rhs: T) -> T; /// Shifts vector right elementwise, with UB on overflow. /// @@ -126,7 +126,7 @@ pub unsafe fn simd_shl(lhs: T, rhs: T) -> T; /// Each element of `rhs` must be less than `::BITS`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_shr(lhs: T, rhs: T) -> T; +pub const unsafe fn simd_shr(lhs: T, rhs: T) -> T; /// Funnel Shifts vector left elementwise, with UB on overflow. /// @@ -143,7 +143,7 @@ pub unsafe fn simd_shr(lhs: T, rhs: T) -> T; /// Each element of `shift` must be less than `::BITS`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_funnel_shl(a: T, b: T, shift: T) -> T; +pub const unsafe fn simd_funnel_shl(a: T, b: T, shift: T) -> T; /// Funnel Shifts vector right elementwise, with UB on overflow. /// @@ -160,28 +160,28 @@ pub unsafe fn simd_funnel_shl(a: T, b: T, shift: T) -> T; /// Each element of `shift` must be less than `::BITS`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_funnel_shr(a: T, b: T, shift: T) -> T; +pub const unsafe fn simd_funnel_shr(a: T, b: T, shift: T) -> T; /// "And"s vectors elementwise. /// /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_and(x: T, y: T) -> T; +pub const unsafe fn simd_and(x: T, y: T) -> T; /// "Ors" vectors elementwise. /// /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_or(x: T, y: T) -> T; +pub const unsafe fn simd_or(x: T, y: T) -> T; /// "Exclusive ors" vectors elementwise. /// /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_xor(x: T, y: T) -> T; +pub const unsafe fn simd_xor(x: T, y: T) -> T; /// Numerically casts a vector, elementwise. /// @@ -202,7 +202,7 @@ pub unsafe fn simd_xor(x: T, y: T) -> T; /// * Be representable in the return type, after truncating off its fractional part #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_cast(x: T) -> U; +pub const unsafe fn simd_cast(x: T) -> U; /// Numerically casts a vector, elementwise. /// @@ -216,7 +216,7 @@ pub unsafe fn simd_cast(x: T) -> U; /// Otherwise, truncates or extends the value, maintaining the sign for signed integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_as(x: T) -> U; +pub const unsafe fn simd_as(x: T) -> U; /// Negates a vector elementwise. /// @@ -225,14 +225,14 @@ pub unsafe fn simd_as(x: T) -> U; /// Rust panics for `-::Min` due to overflow, but it is not UB with this intrinsic. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_neg(x: T) -> T; +pub const unsafe fn simd_neg(x: T) -> T; /// Returns absolute value of a vector, elementwise. /// /// `T` must be a vector of floating-point primitive types. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_fabs(x: T) -> T; +pub const unsafe fn simd_fabs(x: T) -> T; /// Returns the minimum of two vectors, elementwise. /// @@ -241,7 +241,7 @@ pub unsafe fn simd_fabs(x: T) -> T; /// Follows IEEE-754 `minNum` semantics. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_fmin(x: T, y: T) -> T; +pub const unsafe fn simd_fmin(x: T, y: T) -> T; /// Returns the maximum of two vectors, elementwise. /// @@ -250,7 +250,7 @@ pub unsafe fn simd_fmin(x: T, y: T) -> T; /// Follows IEEE-754 `maxNum` semantics. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_fmax(x: T, y: T) -> T; +pub const unsafe fn simd_fmax(x: T, y: T) -> T; /// Tests elementwise equality of two vectors. /// @@ -261,7 +261,7 @@ pub unsafe fn simd_fmax(x: T, y: T) -> T; /// Returns `0` for false and `!0` for true. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_eq(x: T, y: T) -> U; +pub const unsafe fn simd_eq(x: T, y: T) -> U; /// Tests elementwise inequality equality of two vectors. /// @@ -272,7 +272,7 @@ pub unsafe fn simd_eq(x: T, y: T) -> U; /// Returns `0` for false and `!0` for true. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_ne(x: T, y: T) -> U; +pub const unsafe fn simd_ne(x: T, y: T) -> U; /// Tests if `x` is less than `y`, elementwise. /// @@ -283,7 +283,7 @@ pub unsafe fn simd_ne(x: T, y: T) -> U; /// Returns `0` for false and `!0` for true. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_lt(x: T, y: T) -> U; +pub const unsafe fn simd_lt(x: T, y: T) -> U; /// Tests if `x` is less than or equal to `y`, elementwise. /// @@ -294,7 +294,7 @@ pub unsafe fn simd_lt(x: T, y: T) -> U; /// Returns `0` for false and `!0` for true. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_le(x: T, y: T) -> U; +pub const unsafe fn simd_le(x: T, y: T) -> U; /// Tests if `x` is greater than `y`, elementwise. /// @@ -305,7 +305,7 @@ pub unsafe fn simd_le(x: T, y: T) -> U; /// Returns `0` for false and `!0` for true. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_gt(x: T, y: T) -> U; +pub const unsafe fn simd_gt(x: T, y: T) -> U; /// Tests if `x` is greater than or equal to `y`, elementwise. /// @@ -316,7 +316,7 @@ pub unsafe fn simd_gt(x: T, y: T) -> U; /// Returns `0` for false and `!0` for true. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_ge(x: T, y: T) -> U; +pub const unsafe fn simd_ge(x: T, y: T) -> U; /// Shuffles two vectors by const indices. /// @@ -332,7 +332,7 @@ pub unsafe fn simd_ge(x: T, y: T) -> U; /// of `xy`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_shuffle(x: T, y: T, idx: U) -> V; +pub const unsafe fn simd_shuffle(x: T, y: T, idx: U) -> V; /// Reads a vector of pointers. /// @@ -353,7 +353,7 @@ pub unsafe fn simd_shuffle(x: T, y: T, idx: U) -> V; /// `mask` must only contain `0` or `!0` values. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_gather(val: T, ptr: U, mask: V) -> T; +pub const unsafe fn simd_gather(val: T, ptr: U, mask: V) -> T; /// Writes to a vector of pointers. /// @@ -377,7 +377,7 @@ pub unsafe fn simd_gather(val: T, ptr: U, mask: V) -> T; /// `mask` must only contain `0` or `!0` values. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_scatter(val: T, ptr: U, mask: V); +pub const unsafe fn simd_scatter(val: T, ptr: U, mask: V); /// A type for alignment options for SIMD masked load/store intrinsics. #[derive(Debug, ConstParamTy, PartialEq, Eq)] @@ -412,7 +412,8 @@ pub enum SimdAlign { /// `mask` must only contain `0` or `!0` values. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_masked_load(mask: V, ptr: U, val: T) -> T; +pub const unsafe fn simd_masked_load(mask: V, ptr: U, val: T) +-> T; /// Writes to a vector of pointers. /// @@ -433,14 +434,14 @@ pub unsafe fn simd_masked_load(mask: V, ptr: U, /// `mask` must only contain `0` or `!0` values. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_masked_store(mask: V, ptr: U, val: T); +pub const unsafe fn simd_masked_store(mask: V, ptr: U, val: T); /// Adds two simd vectors elementwise, with saturation. /// /// `T` must be a vector of integer primitive types. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_saturating_add(x: T, y: T) -> T; +pub const unsafe fn simd_saturating_add(x: T, y: T) -> T; /// Subtracts two simd vectors elementwise, with saturation. /// @@ -449,7 +450,7 @@ pub unsafe fn simd_saturating_add(x: T, y: T) -> T; /// Subtract `rhs` from `lhs`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_saturating_sub(lhs: T, rhs: T) -> T; +pub const unsafe fn simd_saturating_sub(lhs: T, rhs: T) -> T; /// Adds elements within a vector from left to right. /// @@ -460,7 +461,7 @@ pub unsafe fn simd_saturating_sub(lhs: T, rhs: T) -> T; /// Starting with the value `y`, add the elements of `x` and accumulate. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_add_ordered(x: T, y: U) -> U; +pub const unsafe fn simd_reduce_add_ordered(x: T, y: U) -> U; /// Adds elements within a vector in arbitrary order. May also be re-associated with /// unordered additions on the inputs/outputs. @@ -481,7 +482,7 @@ pub unsafe fn simd_reduce_add_unordered(x: T) -> U; /// Starting with the value `y`, multiply the elements of `x` and accumulate. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_mul_ordered(x: T, y: U) -> U; +pub const unsafe fn simd_reduce_mul_ordered(x: T, y: U) -> U; /// Multiplies elements within a vector in arbitrary order. May also be re-associated with /// unordered additions on the inputs/outputs. @@ -501,7 +502,7 @@ pub unsafe fn simd_reduce_mul_unordered(x: T) -> U; /// `x` must contain only `0` or `!0`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_all(x: T) -> bool; +pub const unsafe fn simd_reduce_all(x: T) -> bool; /// Checks if any mask value is true. /// @@ -511,7 +512,7 @@ pub unsafe fn simd_reduce_all(x: T) -> bool; /// `x` must contain only `0` or `!0`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_any(x: T) -> bool; +pub const unsafe fn simd_reduce_any(x: T) -> bool; /// Returns the maximum element of a vector. /// @@ -522,7 +523,7 @@ pub unsafe fn simd_reduce_any(x: T) -> bool; /// For floating-point values, uses IEEE-754 `maxNum`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_max(x: T) -> U; +pub const unsafe fn simd_reduce_max(x: T) -> U; /// Returns the minimum element of a vector. /// @@ -533,7 +534,7 @@ pub unsafe fn simd_reduce_max(x: T) -> U; /// For floating-point values, uses IEEE-754 `minNum`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_min(x: T) -> U; +pub const unsafe fn simd_reduce_min(x: T) -> U; /// Logical "and"s all elements together. /// @@ -542,7 +543,7 @@ pub unsafe fn simd_reduce_min(x: T) -> U; /// `U` must be the element type of `T`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_and(x: T) -> U; +pub const unsafe fn simd_reduce_and(x: T) -> U; /// Logical "ors" all elements together. /// @@ -551,7 +552,7 @@ pub unsafe fn simd_reduce_and(x: T) -> U; /// `U` must be the element type of `T`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_or(x: T) -> U; +pub const unsafe fn simd_reduce_or(x: T) -> U; /// Logical "exclusive ors" all elements together. /// @@ -560,7 +561,7 @@ pub unsafe fn simd_reduce_or(x: T) -> U; /// `U` must be the element type of `T`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_xor(x: T) -> U; +pub const unsafe fn simd_reduce_xor(x: T) -> U; /// Truncates an integer vector to a bitmask. /// @@ -597,7 +598,7 @@ pub unsafe fn simd_reduce_xor(x: T) -> U; /// `x` must contain only `0` and `!0`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_bitmask(x: T) -> U; +pub const unsafe fn simd_bitmask(x: T) -> U; /// Selects elements from a mask. /// @@ -613,7 +614,7 @@ pub unsafe fn simd_bitmask(x: T) -> U; /// `mask` must only contain `0` and `!0`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_select(mask: M, if_true: T, if_false: T) -> T; +pub const unsafe fn simd_select(mask: M, if_true: T, if_false: T) -> T; /// Selects elements from a bitmask. /// @@ -629,7 +630,7 @@ pub unsafe fn simd_select(mask: M, if_true: T, if_false: T) -> T; /// The bitmask bit order matches `simd_bitmask`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_select_bitmask(m: M, yes: T, no: T) -> T; +pub const unsafe fn simd_select_bitmask(m: M, yes: T, no: T) -> T; /// Calculates the offset from a pointer vector elementwise, potentially /// wrapping. @@ -641,14 +642,14 @@ pub unsafe fn simd_select_bitmask(m: M, yes: T, no: T) -> T; /// Operates as if by `::wrapping_offset`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_arith_offset(ptr: T, offset: U) -> T; +pub const unsafe fn simd_arith_offset(ptr: T, offset: U) -> T; /// Casts a vector of pointers. /// /// `T` and `U` must be vectors of pointers with the same number of elements. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_cast_ptr(ptr: T) -> U; +pub const unsafe fn simd_cast_ptr(ptr: T) -> U; /// Exposes a vector of pointers as a vector of addresses. /// @@ -666,56 +667,56 @@ pub unsafe fn simd_expose_provenance(ptr: T) -> U; /// `U` must be a vector of pointers, with the same length as `T`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_with_exposed_provenance(addr: T) -> U; +pub const unsafe fn simd_with_exposed_provenance(addr: T) -> U; /// Swaps bytes of each element. /// /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_bswap(x: T) -> T; +pub const unsafe fn simd_bswap(x: T) -> T; /// Reverses bits of each element. /// /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_bitreverse(x: T) -> T; +pub const unsafe fn simd_bitreverse(x: T) -> T; /// Counts the leading zeros of each element. /// /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_ctlz(x: T) -> T; +pub const unsafe fn simd_ctlz(x: T) -> T; /// Counts the number of ones in each element. /// /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_ctpop(x: T) -> T; +pub const unsafe fn simd_ctpop(x: T) -> T; /// Counts the trailing zeros of each element. /// /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_cttz(x: T) -> T; +pub const unsafe fn simd_cttz(x: T) -> T; /// Rounds up each element to the next highest integer-valued float. /// /// `T` must be a vector of floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_ceil(x: T) -> T; +pub const unsafe fn simd_ceil(x: T) -> T; /// Rounds down each element to the next lowest integer-valued float. /// /// `T` must be a vector of floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_floor(x: T) -> T; +pub const unsafe fn simd_floor(x: T) -> T; /// Rounds each element to the closest integer-valued float. /// Ties are resolved by rounding away from 0. @@ -723,7 +724,7 @@ pub unsafe fn simd_floor(x: T) -> T; /// `T` must be a vector of floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_round(x: T) -> T; +pub const unsafe fn simd_round(x: T) -> T; /// Rounds each element to the closest integer-valued float. /// Ties are resolved by rounding to the number with an even least significant digit @@ -731,7 +732,7 @@ pub unsafe fn simd_round(x: T) -> T; /// `T` must be a vector of floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_round_ties_even(x: T) -> T; +pub const unsafe fn simd_round_ties_even(x: T) -> T; /// Returns the integer part of each element as an integer-valued float. /// In other words, non-integer values are truncated towards zero. @@ -739,7 +740,7 @@ pub unsafe fn simd_round_ties_even(x: T) -> T; /// `T` must be a vector of floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_trunc(x: T) -> T; +pub const unsafe fn simd_trunc(x: T) -> T; /// Takes the square root of each element. /// @@ -753,7 +754,7 @@ pub unsafe fn simd_fsqrt(x: T) -> T; /// `T` must be a vector of floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_fma(x: T, y: T, z: T) -> T; +pub const unsafe fn simd_fma(x: T, y: T, z: T) -> T; /// Computes `(x*y) + z` for each element, non-deterministically executing either /// a fused multiply-add or two operations with rounding of the intermediate result. @@ -768,7 +769,7 @@ pub unsafe fn simd_fma(x: T, y: T, z: T) -> T; /// `T` must be a vector of floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_relaxed_fma(x: T, y: T, z: T) -> T; +pub const unsafe fn simd_relaxed_fma(x: T, y: T, z: T) -> T; // Computes the sine of each element. /// diff --git a/src/tools/miri/tests/pass/intrinsics/portable-simd.rs b/src/tools/miri/tests/pass/intrinsics/portable-simd.rs index 961a4b82a7e9b..56c000633e586 100644 --- a/src/tools/miri/tests/pass/intrinsics/portable-simd.rs +++ b/src/tools/miri/tests/pass/intrinsics/portable-simd.rs @@ -60,7 +60,7 @@ impl PackedSimd { #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_shuffle_const_generic(x: T, y: T) -> U; +pub const unsafe fn simd_shuffle_const_generic(x: T, y: T) -> U; fn simd_ops_f16() { use intrinsics::*; diff --git a/tests/auxiliary/minisimd.rs b/tests/auxiliary/minisimd.rs index ff0c996de1c87..38e2621698dbf 100644 --- a/tests/auxiliary/minisimd.rs +++ b/tests/auxiliary/minisimd.rs @@ -10,6 +10,10 @@ #![allow(unused)] #![allow(non_camel_case_types)] +// FIXME: `cfg(minisimd_const)` is used to toggle use of const trait impls, which require a few +// nightly features. Remove this when `const_trait_impls`, `const_cmp` and `const_index` are +// stablilized. +#![allow(unexpected_cfgs)] // The field is currently left `pub` for convenience in porting tests, many of // which attempt to just construct it directly. That still works; it's just the @@ -24,39 +28,32 @@ impl Clone for Simd { } } -impl PartialEq for Simd { - fn eq(&self, other: &Self) -> bool { - self.as_array() == other.as_array() - } -} - impl core::fmt::Debug for Simd { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { <[T; N] as core::fmt::Debug>::fmt(self.as_array(), f) } } -impl core::ops::Index for Simd { - type Output = T; - fn index(&self, i: usize) -> &T { - &self.as_array()[i] - } -} - impl Simd { pub const fn from_array(a: [T; N]) -> Self { Simd(a) } - pub fn as_array(&self) -> &[T; N] { + pub const fn as_array(&self) -> &[T; N] { let p: *const Self = self; unsafe { &*p.cast::<[T; N]>() } } - pub fn into_array(self) -> [T; N] + pub const fn into_array(self) -> [T; N] where T: Copy, { *self.as_array() } + pub const fn splat(a: T) -> Self + where + T: Copy, + { + Self([a; N]) + } } pub type u8x2 = Simd; @@ -109,6 +106,14 @@ pub type i64x8 = Simd; pub type i128x2 = Simd; pub type i128x4 = Simd; +pub type usizex2 = Simd; +pub type usizex4 = Simd; +pub type usizex8 = Simd; + +pub type isizex2 = Simd; +pub type isizex4 = Simd; +pub type isizex8 = Simd; + pub type f32x2 = Simd; pub type f32x4 = Simd; pub type f32x8 = Simd; @@ -122,7 +127,7 @@ pub type f64x8 = Simd; // which attempt to just construct it directly. That still works; it's just the // `.0` projection that doesn't. #[repr(simd, packed)] -#[derive(Copy)] +#[derive(Copy, Eq)] pub struct PackedSimd(pub [T; N]); impl Clone for PackedSimd { @@ -131,12 +136,6 @@ impl Clone for PackedSimd { } } -impl PartialEq for PackedSimd { - fn eq(&self, other: &Self) -> bool { - self.as_array() == other.as_array() - } -} - impl core::fmt::Debug for PackedSimd { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { <[T; N] as core::fmt::Debug>::fmt(self.as_array(), f) @@ -147,14 +146,81 @@ impl PackedSimd { pub const fn from_array(a: [T; N]) -> Self { PackedSimd(a) } - pub fn as_array(&self) -> &[T; N] { + pub const fn as_array(&self) -> &[T; N] { let p: *const Self = self; unsafe { &*p.cast::<[T; N]>() } } - pub fn into_array(self) -> [T; N] + pub const fn into_array(self) -> [T; N] where T: Copy, { *self.as_array() } + pub const fn splat(a: T) -> Self + where + T: Copy, + { + Self([a; N]) + } +} + +// As `const_trait_impl` is a language feature with specialized syntax, we have to use them in a way +// such that it doesn't get parsed as Rust code unless `cfg(minisimd_const)` is on. The easiest way +// for that is a macro + +macro_rules! impl_traits { + ($($const_:ident)?) => { + impl $($const_)? PartialEq for Simd { + fn eq(&self, other: &Self) -> bool { + self.as_array() == other.as_array() + } + } + + impl $($const_)? core::ops::Index for Simd { + type Output = T; + fn index(&self, i: usize) -> &T { + &self.as_array()[i] + } + } + + impl $($const_)? PartialEq for PackedSimd + { + fn eq(&self, other: &Self) -> bool { + self.as_array() == other.as_array() + } + } + }; } + +#[cfg(minisimd_const)] +impl_traits!(const); + +#[cfg(not(minisimd_const))] +impl_traits!(); + +/// Version of `assert_eq` that ignores fancy runtime printing in const context. +/// FIXME: Remove once is fixed. +#[cfg(minisimd_const)] +#[macro_export] +macro_rules! assert_eq { + ($left:expr, $right:expr $(,)?) => { + assert_eq!( + $left, + $right, + concat!("`", stringify!($left), "` == `", stringify!($right), "`") + ); + }; + ($left:expr, $right:expr$(, $($arg:tt)+)?) => { + { + let left = $left; + let right = $right; + // type inference works better with the concrete type on the + // left, but humans work better with the expected on the + // right + assert!(right == left, $($($arg)*),*); + } + }; +} + +#[cfg(minisimd_const)] +use assert_eq; diff --git a/tests/ui/simd/intrinsic/float-math-pass.rs b/tests/ui/simd/intrinsic/float-math-pass.rs index 743aae8d1c319..8797a8a9dca49 100644 --- a/tests/ui/simd/intrinsic/float-math-pass.rs +++ b/tests/ui/simd/intrinsic/float-math-pass.rs @@ -1,6 +1,7 @@ //@ run-pass //@ ignore-emscripten //@ ignore-android +//@ compile-flags: --cfg minisimd_const // FIXME: this test fails on arm-android because the NDK version 14 is too old. // It needs at least version 18. We disable it on all android build bots because @@ -8,7 +9,7 @@ // Test that the simd floating-point math intrinsics produce correct results. -#![feature(repr_simd, intrinsics, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #![allow(non_camel_case_types)] #[path = "../../../auxiliary/minisimd.rs"] @@ -20,7 +21,10 @@ use std::intrinsics::simd::*; macro_rules! assert_approx_eq_f32 { ($a:expr, $b:expr) => {{ let (a, b) = (&$a, &$b); - assert!((*a - *b).abs() < 1.0e-6, "{} is not approximately equal to {}", *a, *b); + assert!( + (*a - *b).abs() < 1.0e-6, + concat!(stringify!($a), " is not approximately equal to ", stringify!($b)) + ); }}; } macro_rules! assert_approx_eq { @@ -34,7 +38,7 @@ macro_rules! assert_approx_eq { }}; } -fn main() { +const fn simple_math() { let x = f32x4::from_array([1.0, 1.0, 1.0, 1.0]); let y = f32x4::from_array([-1.0, -1.0, -1.0, -1.0]); let z = f32x4::from_array([0.0, 0.0, 0.0, 0.0]); @@ -43,21 +47,44 @@ fn main() { unsafe { let r = simd_fabs(y); - assert_approx_eq!(x, r); + assert_eq!(x, r); - let r = simd_fcos(z); + // rounding functions + let r = simd_floor(h); + assert_eq!(z, r); + + let r = simd_ceil(h); + assert_eq!(x, r); + + let r = simd_round(h); + assert_eq!(x, r); + + let r = simd_round_ties_even(h); + assert_eq!(z, r); + + let r = simd_trunc(h); + assert_eq!(z, r); + + let r = simd_fma(x, h, h); assert_approx_eq!(x, r); - let r = simd_fexp(z); + let r = simd_relaxed_fma(x, h, h); assert_approx_eq!(x, r); + } +} - let r = simd_fexp2(z); +fn special_math() { + let x = f32x4::from_array([1.0, 1.0, 1.0, 1.0]); + let z = f32x4::from_array([0.0, 0.0, 0.0, 0.0]); + + unsafe { + let r = simd_fcos(z); assert_approx_eq!(x, r); - let r = simd_fma(x, h, h); + let r = simd_fexp(z); assert_approx_eq!(x, r); - let r = simd_relaxed_fma(x, h, h); + let r = simd_fexp2(z); assert_approx_eq!(x, r); let r = simd_fsqrt(x); @@ -74,21 +101,11 @@ fn main() { let r = simd_fsin(z); assert_approx_eq!(z, r); - - // rounding functions - let r = simd_floor(h); - assert_eq!(z, r); - - let r = simd_ceil(h); - assert_eq!(x, r); - - let r = simd_round(h); - assert_eq!(x, r); - - let r = simd_round_ties_even(h); - assert_eq!(z, r); - - let r = simd_trunc(h); - assert_eq!(z, r); } } + +fn main() { + const { simple_math() }; + simple_math(); + special_math(); +} diff --git a/tests/ui/simd/intrinsic/float-minmax-pass.rs b/tests/ui/simd/intrinsic/float-minmax-pass.rs index 12210ba0ad120..4b6a35556ed57 100644 --- a/tests/ui/simd/intrinsic/float-minmax-pass.rs +++ b/tests/ui/simd/intrinsic/float-minmax-pass.rs @@ -1,9 +1,10 @@ //@ run-pass //@ ignore-emscripten +//@ compile-flags: --cfg minisimd_const // Test that the simd_f{min,max} intrinsics produce the correct results. -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #![allow(non_camel_case_types)] #[path = "../../../auxiliary/minisimd.rs"] @@ -12,7 +13,7 @@ use minisimd::*; use std::intrinsics::simd::*; -fn main() { +const fn minmax() { let x = f32x4::from_array([1.0, 2.0, 3.0, 4.0]); let y = f32x4::from_array([2.0, 1.0, 4.0, 3.0]); @@ -47,3 +48,8 @@ fn main() { assert_eq!(maxn, y); } } + +fn main() { + const { minmax() }; + minmax(); +} diff --git a/tests/ui/simd/intrinsic/generic-arithmetic-pass.rs b/tests/ui/simd/intrinsic/generic-arithmetic-pass.rs index 09f5d41a87c13..62782db37ed4c 100644 --- a/tests/ui/simd/intrinsic/generic-arithmetic-pass.rs +++ b/tests/ui/simd/intrinsic/generic-arithmetic-pass.rs @@ -1,8 +1,9 @@ //@ run-pass //@ ignore-backends: gcc +//@ compile-flags: --cfg minisimd_const #![allow(non_camel_case_types)] -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -20,7 +21,7 @@ macro_rules! all_eq { use std::intrinsics::simd::*; -fn main() { +const fn arithmetic() { let x1 = i32x4::from_array([1, 2, 3, 4]); let y1 = U32::<4>::from_array([1, 2, 3, 4]); let z1 = f32x4::from_array([1.0, 2.0, 3.0, 4.0]); @@ -224,3 +225,8 @@ fn main() { all_eq!(simd_cttz(y1), U32::<4>::from_array([0, 1, 0, 2])); } } + +fn main() { + const { arithmetic() }; + arithmetic(); +} diff --git a/tests/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs b/tests/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs index a997f12370347..f139bf3220102 100644 --- a/tests/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs +++ b/tests/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs @@ -1,8 +1,9 @@ //@ run-pass //@ ignore-emscripten +//@ compile-flags: --cfg minisimd_const #![allow(non_camel_case_types)] -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -12,7 +13,7 @@ use std::intrinsics::simd::{simd_saturating_add, simd_saturating_sub}; type I32 = Simd; -fn main() { +const fn saturating() { // unsigned { const M: u32 = u32::MAX; @@ -84,3 +85,8 @@ fn main() { } } } + +fn main() { + const { saturating() }; + saturating(); +} diff --git a/tests/ui/simd/intrinsic/generic-as.rs b/tests/ui/simd/intrinsic/generic-as.rs index bba712e62966a..b81402e9dca4d 100644 --- a/tests/ui/simd/intrinsic/generic-as.rs +++ b/tests/ui/simd/intrinsic/generic-as.rs @@ -1,7 +1,8 @@ //@ run-pass //@ ignore-backends: gcc +//@ compile-flags: --cfg minisimd_const -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -11,7 +12,7 @@ use std::intrinsics::simd::simd_as; type V = Simd; -fn main() { +const fn as_simd() { unsafe { let u: V:: = Simd([u32::MIN, u32::MAX]); let i: V = simd_as(u); @@ -47,3 +48,8 @@ fn main() { assert_eq!(u[1], f[1] as usize); } } + +fn main() { + const { as_simd() }; + as_simd(); +} diff --git a/tests/ui/simd/intrinsic/generic-bitmask-pass.rs b/tests/ui/simd/intrinsic/generic-bitmask-pass.rs index cb3221e21d530..afe96de63becf 100644 --- a/tests/ui/simd/intrinsic/generic-bitmask-pass.rs +++ b/tests/ui/simd/intrinsic/generic-bitmask-pass.rs @@ -1,41 +1,31 @@ //@ run-pass -#![allow(non_camel_case_types)] //@ ignore-emscripten //@ ignore-endian-big behavior of simd_bitmask is endian-specific +//@ compile-flags: --cfg minisimd_const // Test that the simd_bitmask intrinsic produces correct results. -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] -use std::intrinsics::simd::simd_bitmask; - -#[repr(simd)] -#[derive(Copy, Clone, PartialEq, Debug)] -struct u32x4(pub [u32; 4]); - -#[repr(simd)] -#[derive(Copy, Clone, PartialEq, Debug)] -struct u8x4(pub [u8; 4]); +#[path = "../../../auxiliary/minisimd.rs"] +mod minisimd; +use minisimd::*; -#[repr(simd)] -#[derive(Copy, Clone, PartialEq, Debug)] -struct Tx4(pub [T; 4]); +use std::intrinsics::simd::simd_bitmask; -fn main() { - let z = u32x4([0, 0, 0, 0]); +const fn bitmask() { + let z = u32x4::from_array([0, 0, 0, 0]); let ez = 0_u8; - let o = u32x4([!0, !0, !0, !0]); + let o = u32x4::from_array([!0, !0, !0, !0]); let eo = 0b_1111_u8; - let m0 = u32x4([!0, 0, !0, 0]); + let m0 = u32x4::from_array([!0, 0, !0, 0]); let e0 = 0b_0000_0101_u8; - // Check that the MSB is extracted: - let m = u8x4([0b_1000_0000, 0b_0100_0001, 0b_1100_0001, 0b_1111_1111]); let e = 0b_1101; // Check usize / isize - let msize: Tx4 = Tx4([usize::MAX, 0, usize::MAX, usize::MAX]); + let msize = usizex4::from_array([usize::MAX, 0, usize::MAX, usize::MAX]); unsafe { let r: u8 = simd_bitmask(z); @@ -47,10 +37,12 @@ fn main() { let r: u8 = simd_bitmask(m0); assert_eq!(r, e0); - let r: u8 = simd_bitmask(m); - assert_eq!(r, e); - let r: u8 = simd_bitmask(msize); assert_eq!(r, e); } } + +fn main() { + const { bitmask() }; + bitmask(); +} diff --git a/tests/ui/simd/intrinsic/generic-bswap-byte.rs b/tests/ui/simd/intrinsic/generic-bswap-byte.rs index d30a560b1c2ed..52015c552abaa 100644 --- a/tests/ui/simd/intrinsic/generic-bswap-byte.rs +++ b/tests/ui/simd/intrinsic/generic-bswap-byte.rs @@ -1,6 +1,6 @@ //@ run-pass -#![feature(repr_simd, core_intrinsics)] -#![allow(non_camel_case_types)] +//@ compile-flags: --cfg minisimd_const +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -8,9 +8,14 @@ use minisimd::*; use std::intrinsics::simd::simd_bswap; -fn main() { +const fn bswap() { unsafe { assert_eq!(simd_bswap(i8x4::from_array([0, 1, 2, 3])).into_array(), [0, 1, 2, 3]); assert_eq!(simd_bswap(u8x4::from_array([0, 1, 2, 3])).into_array(), [0, 1, 2, 3]); } } + +fn main() { + const { bswap() }; + bswap(); +} diff --git a/tests/ui/simd/intrinsic/generic-cast-pass.rs b/tests/ui/simd/intrinsic/generic-cast-pass.rs index 0c3b00d65bf5c..9aadb5d0008a0 100644 --- a/tests/ui/simd/intrinsic/generic-cast-pass.rs +++ b/tests/ui/simd/intrinsic/generic-cast-pass.rs @@ -1,6 +1,7 @@ //@ run-pass +//@ compile-flags: --cfg minisimd_const -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -12,7 +13,7 @@ use std::cmp::{max, min}; type V = Simd; -fn main() { +const fn cast() { unsafe { let u: V:: = Simd([i16::MIN as u32, i16::MAX as u32]); let i: V = simd_cast(u); @@ -56,3 +57,8 @@ fn main() { assert_eq!(u[1], f[1] as usize); } } + +fn main() { + const { cast() }; + cast(); +} diff --git a/tests/ui/simd/intrinsic/generic-cast-pointer-width.rs b/tests/ui/simd/intrinsic/generic-cast-pointer-width.rs index 594d1d25d165c..7e50fec565651 100644 --- a/tests/ui/simd/intrinsic/generic-cast-pointer-width.rs +++ b/tests/ui/simd/intrinsic/generic-cast-pointer-width.rs @@ -1,5 +1,6 @@ //@ run-pass -#![feature(repr_simd, core_intrinsics)] +//@ compile-flags: --cfg minisimd_const +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -9,17 +10,16 @@ use std::intrinsics::simd::simd_cast; type V = Simd; -fn main() { +const fn cast_ptr_width() { let u: V:: = Simd([0, 1, 2, 3]); let uu32: V = unsafe { simd_cast(u) }; let ui64: V = unsafe { simd_cast(u) }; - for (u, (uu32, ui64)) in u - .as_array() - .iter() - .zip(uu32.as_array().iter().zip(ui64.as_array().iter())) - { - assert_eq!(*u as u32, *uu32); - assert_eq!(*u as i64, *ui64); - } + assert_eq!(uu32, V::::from_array([0, 1, 2, 3])); + assert_eq!(ui64, V::::from_array([0, 1, 2, 3])); +} + +fn main() { + const { cast_ptr_width() }; + cast_ptr_width(); } diff --git a/tests/ui/simd/intrinsic/generic-comparison-pass.rs b/tests/ui/simd/intrinsic/generic-comparison-pass.rs index 3e803e8f60327..a4d19faeeeedd 100644 --- a/tests/ui/simd/intrinsic/generic-comparison-pass.rs +++ b/tests/ui/simd/intrinsic/generic-comparison-pass.rs @@ -1,7 +1,14 @@ //@ run-pass +//@ compile-flags: --cfg minisimd_const -#![feature(repr_simd, core_intrinsics, macro_metavar_expr_concat)] -#![allow(non_camel_case_types)] +#![feature( + repr_simd, + core_intrinsics, + const_trait_impl, + const_cmp, + const_index, + macro_metavar_expr_concat +)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -25,27 +32,26 @@ macro_rules! cmp { macro_rules! tests { ($($lhs: ident, $rhs: ident;)*) => {{ $( - (|| { - cmp!(eq($lhs, $rhs)); - cmp!(ne($lhs, $rhs)); + cmp!(eq($lhs, $rhs)); + cmp!(ne($lhs, $rhs)); - // test both directions - cmp!(lt($lhs, $rhs)); - cmp!(lt($rhs, $lhs)); + // test both directions + cmp!(lt($lhs, $rhs)); + cmp!(lt($rhs, $lhs)); - cmp!(le($lhs, $rhs)); - cmp!(le($rhs, $lhs)); + cmp!(le($lhs, $rhs)); + cmp!(le($rhs, $lhs)); - cmp!(gt($lhs, $rhs)); - cmp!(gt($rhs, $lhs)); + cmp!(gt($lhs, $rhs)); + cmp!(gt($rhs, $lhs)); - cmp!(ge($lhs, $rhs)); - cmp!(ge($rhs, $lhs)); - })(); - )* + cmp!(ge($lhs, $rhs)); + cmp!(ge($rhs, $lhs)); + )* }} } -fn main() { + +const fn compare() { // 13 vs. -100 tests that we get signed vs. unsigned comparisons // correct (i32: 13 > -100, u32: 13 < -100). let i1 = i32x4(10, -11, 12, 13); let i1 = i32x4::from_array([10, -11, 12, 13]); @@ -89,3 +95,8 @@ fn main() { } } } + +fn main() { + const { compare() }; + compare(); +} diff --git a/tests/ui/simd/intrinsic/generic-elements-pass.rs b/tests/ui/simd/intrinsic/generic-elements-pass.rs index f441d992e11b7..680e0dcfd7d6a 100644 --- a/tests/ui/simd/intrinsic/generic-elements-pass.rs +++ b/tests/ui/simd/intrinsic/generic-elements-pass.rs @@ -1,6 +1,7 @@ //@ run-pass +//@ compile-flags: --cfg minisimd_const -#![feature(repr_simd, intrinsics, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -20,49 +21,15 @@ macro_rules! all_eq { // type inference works better with the concrete type on the // left, but humans work better with the expected on the // right. - assert!(b == a, "{:?} != {:?}", a, b); + assert!(b == a, concat!(stringify!($a), "!=", stringify!($b))); }}; } -fn main() { +fn extract_insert_dyn() { let x2 = i32x2::from_array([20, 21]); let x4 = i32x4::from_array([40, 41, 42, 43]); let x8 = i32x8::from_array([80, 81, 82, 83, 84, 85, 86, 87]); - unsafe { - all_eq!(simd_insert(x2, 0, 100), i32x2::from_array([100, 21])); - all_eq!(simd_insert(x2, 1, 100), i32x2::from_array([20, 100])); - - all_eq!(simd_insert(x4, 0, 100), i32x4::from_array([100, 41, 42, 43])); - all_eq!(simd_insert(x4, 1, 100), i32x4::from_array([40, 100, 42, 43])); - all_eq!(simd_insert(x4, 2, 100), i32x4::from_array([40, 41, 100, 43])); - all_eq!(simd_insert(x4, 3, 100), i32x4::from_array([40, 41, 42, 100])); - - all_eq!(simd_insert(x8, 0, 100), i32x8::from_array([100, 81, 82, 83, 84, 85, 86, 87])); - all_eq!(simd_insert(x8, 1, 100), i32x8::from_array([80, 100, 82, 83, 84, 85, 86, 87])); - all_eq!(simd_insert(x8, 2, 100), i32x8::from_array([80, 81, 100, 83, 84, 85, 86, 87])); - all_eq!(simd_insert(x8, 3, 100), i32x8::from_array([80, 81, 82, 100, 84, 85, 86, 87])); - all_eq!(simd_insert(x8, 4, 100), i32x8::from_array([80, 81, 82, 83, 100, 85, 86, 87])); - all_eq!(simd_insert(x8, 5, 100), i32x8::from_array([80, 81, 82, 83, 84, 100, 86, 87])); - all_eq!(simd_insert(x8, 6, 100), i32x8::from_array([80, 81, 82, 83, 84, 85, 100, 87])); - all_eq!(simd_insert(x8, 7, 100), i32x8::from_array([80, 81, 82, 83, 84, 85, 86, 100])); - - all_eq!(simd_extract(x2, 0), 20); - all_eq!(simd_extract(x2, 1), 21); - - all_eq!(simd_extract(x4, 0), 40); - all_eq!(simd_extract(x4, 1), 41); - all_eq!(simd_extract(x4, 2), 42); - all_eq!(simd_extract(x4, 3), 43); - all_eq!(simd_extract(x8, 0), 80); - all_eq!(simd_extract(x8, 1), 81); - all_eq!(simd_extract(x8, 2), 82); - all_eq!(simd_extract(x8, 3), 83); - all_eq!(simd_extract(x8, 4), 84); - all_eq!(simd_extract(x8, 5), 85); - all_eq!(simd_extract(x8, 6), 86); - all_eq!(simd_extract(x8, 7), 87); - } unsafe { all_eq!(simd_insert_dyn(x2, 0, 100), i32x2::from_array([100, 21])); all_eq!(simd_insert_dyn(x2, 1, 100), i32x2::from_array([20, 100])); @@ -98,48 +65,83 @@ fn main() { all_eq!(simd_extract_dyn(x8, 6), 86); all_eq!(simd_extract_dyn(x8, 7), 87); } +} + +macro_rules! simd_shuffle { + ($a:expr, $b:expr, $swizzle:expr) => { + simd_shuffle($a, $b, const { SimdShuffleIdx($swizzle) }) + }; +} + +const fn swizzle() { + let x2 = i32x2::from_array([20, 21]); + let x4 = i32x4::from_array([40, 41, 42, 43]); + let x8 = i32x8::from_array([80, 81, 82, 83, 84, 85, 86, 87]); + unsafe { + all_eq!(simd_insert(x2, 0, 100), i32x2::from_array([100, 21])); + all_eq!(simd_insert(x2, 1, 100), i32x2::from_array([20, 100])); + + all_eq!(simd_insert(x4, 0, 100), i32x4::from_array([100, 41, 42, 43])); + all_eq!(simd_insert(x4, 1, 100), i32x4::from_array([40, 100, 42, 43])); + all_eq!(simd_insert(x4, 2, 100), i32x4::from_array([40, 41, 100, 43])); + all_eq!(simd_insert(x4, 3, 100), i32x4::from_array([40, 41, 42, 100])); + + all_eq!(simd_insert(x8, 0, 100), i32x8::from_array([100, 81, 82, 83, 84, 85, 86, 87])); + all_eq!(simd_insert(x8, 1, 100), i32x8::from_array([80, 100, 82, 83, 84, 85, 86, 87])); + all_eq!(simd_insert(x8, 2, 100), i32x8::from_array([80, 81, 100, 83, 84, 85, 86, 87])); + all_eq!(simd_insert(x8, 3, 100), i32x8::from_array([80, 81, 82, 100, 84, 85, 86, 87])); + all_eq!(simd_insert(x8, 4, 100), i32x8::from_array([80, 81, 82, 83, 100, 85, 86, 87])); + all_eq!(simd_insert(x8, 5, 100), i32x8::from_array([80, 81, 82, 83, 84, 100, 86, 87])); + all_eq!(simd_insert(x8, 6, 100), i32x8::from_array([80, 81, 82, 83, 84, 85, 100, 87])); + all_eq!(simd_insert(x8, 7, 100), i32x8::from_array([80, 81, 82, 83, 84, 85, 86, 100])); + + all_eq!(simd_extract(x2, 0), 20); + all_eq!(simd_extract(x2, 1), 21); + + all_eq!(simd_extract(x4, 0), 40); + all_eq!(simd_extract(x4, 1), 41); + all_eq!(simd_extract(x4, 2), 42); + all_eq!(simd_extract(x4, 3), 43); + + all_eq!(simd_extract(x8, 0), 80); + all_eq!(simd_extract(x8, 1), 81); + all_eq!(simd_extract(x8, 2), 82); + all_eq!(simd_extract(x8, 3), 83); + all_eq!(simd_extract(x8, 4), 84); + all_eq!(simd_extract(x8, 5), 85); + all_eq!(simd_extract(x8, 6), 86); + all_eq!(simd_extract(x8, 7), 87); + } let y2 = i32x2::from_array([120, 121]); let y4 = i32x4::from_array([140, 141, 142, 143]); let y8 = i32x8::from_array([180, 181, 182, 183, 184, 185, 186, 187]); unsafe { + all_eq!(simd_shuffle!(x2, y2, [3u32, 0]), i32x2::from_array([121, 20])); + all_eq!(simd_shuffle!(x2, y2, [3u32, 0, 1, 2]), i32x4::from_array([121, 20, 21, 120])); all_eq!( - simd_shuffle(x2, y2, const { SimdShuffleIdx([3u32, 0]) }), - i32x2::from_array([121, 20]) - ); - all_eq!( - simd_shuffle(x2, y2, const { SimdShuffleIdx([3u32, 0, 1, 2]) }), - i32x4::from_array([121, 20, 21, 120]) - ); - all_eq!( - simd_shuffle(x2, y2, const { SimdShuffleIdx([3u32, 0, 1, 2, 1, 2, 3, 0]) }), + simd_shuffle!(x2, y2, [3u32, 0, 1, 2, 1, 2, 3, 0]), i32x8::from_array([121, 20, 21, 120, 21, 120, 121, 20]) ); + all_eq!(simd_shuffle!(x4, y4, [7u32, 2]), i32x2::from_array([143, 42])); + all_eq!(simd_shuffle!(x4, y4, [7u32, 2, 5, 0]), i32x4::from_array([143, 42, 141, 40])); all_eq!( - simd_shuffle(x4, y4, const { SimdShuffleIdx([7u32, 2]) }), - i32x2::from_array([143, 42]) - ); - all_eq!( - simd_shuffle(x4, y4, const { SimdShuffleIdx([7u32, 2, 5, 0]) }), - i32x4::from_array([143, 42, 141, 40]) - ); - all_eq!( - simd_shuffle(x4, y4, const { SimdShuffleIdx([7u32, 2, 5, 0, 3, 6, 4, 1]) }), + simd_shuffle!(x4, y4, [7u32, 2, 5, 0, 3, 6, 4, 1]), i32x8::from_array([143, 42, 141, 40, 43, 142, 140, 41]) ); + all_eq!(simd_shuffle!(x8, y8, [11u32, 5]), i32x2::from_array([183, 85])); + all_eq!(simd_shuffle!(x8, y8, [11u32, 5, 15, 0]), i32x4::from_array([183, 85, 187, 80])); all_eq!( - simd_shuffle(x8, y8, const { SimdShuffleIdx([11u32, 5]) }), - i32x2::from_array([183, 85]) - ); - all_eq!( - simd_shuffle(x8, y8, const { SimdShuffleIdx([11u32, 5, 15, 0]) }), - i32x4::from_array([183, 85, 187, 80]) - ); - all_eq!( - simd_shuffle(x8, y8, const { SimdShuffleIdx([11u32, 5, 15, 0, 3, 8, 12, 1]) }), + simd_shuffle!(x8, y8, [11u32, 5, 15, 0, 3, 8, 12, 1]), i32x8::from_array([183, 85, 187, 80, 83, 180, 184, 81]) ); } } + +fn main() { + extract_insert_dyn(); + const { swizzle() }; + swizzle(); +} diff --git a/tests/ui/simd/intrinsic/generic-gather-scatter-pass.rs b/tests/ui/simd/intrinsic/generic-gather-scatter-pass.rs index c2418c019edaf..96c1040123841 100644 --- a/tests/ui/simd/intrinsic/generic-gather-scatter-pass.rs +++ b/tests/ui/simd/intrinsic/generic-gather-scatter-pass.rs @@ -1,9 +1,10 @@ //@ run-pass //@ ignore-emscripten +//@ compile-flags: --cfg minisimd_const // Test that the simd_{gather,scatter} intrinsics produce the correct results. -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #![allow(non_camel_case_types)] #[path = "../../../auxiliary/minisimd.rs"] @@ -14,48 +15,11 @@ use std::intrinsics::simd::{simd_gather, simd_scatter}; type x4 = Simd; -fn main() { - let mut x = [0_f32, 1., 2., 3., 4., 5., 6., 7.]; - - let default = x4::from_array([-3_f32, -3., -3., -3.]); - let s_strided = x4::from_array([0_f32, 2., -3., 6.]); +fn gather_scatter_of_ptrs() { + // test modifying array of *const f32 + let x = [0_f32, 1., 2., 3., 4., 5., 6., 7.]; let mask = x4::from_array([-1_i32, -1, 0, -1]); - // reading from *const - unsafe { - let pointer = x.as_ptr(); - let pointers = - x4::from_array(std::array::from_fn(|i| pointer.add(i * 2))); - - let r_strided = simd_gather(default, pointers, mask); - - assert_eq!(r_strided, s_strided); - } - - // reading from *mut - unsafe { - let pointer = x.as_mut_ptr(); - let pointers = - x4::from_array(std::array::from_fn(|i| pointer.add(i * 2))); - - let r_strided = simd_gather(default, pointers, mask); - - assert_eq!(r_strided, s_strided); - } - - // writing to *mut - unsafe { - let pointer = x.as_mut_ptr(); - let pointers = - x4::from_array(std::array::from_fn(|i| pointer.add(i * 2))); - - let values = x4::from_array([42_f32, 43_f32, 44_f32, 45_f32]); - simd_scatter(values, pointers, mask); - - assert_eq!(x, [42., 1., 43., 3., 4., 5., 45., 7.]); - } - - // test modifying array of *const f32 let mut y = [ &x[0] as *const f32, &x[1] as *const f32, @@ -73,8 +37,7 @@ fn main() { // reading from *const unsafe { let pointer = y.as_ptr(); - let pointers = - x4::from_array(std::array::from_fn(|i| pointer.add(i * 2))); + let pointers = x4::from_array([pointer, pointer.add(2), pointer.add(4), pointer.add(6)]); let r_strided = simd_gather(default, pointers, mask); @@ -84,8 +47,7 @@ fn main() { // reading from *mut unsafe { let pointer = y.as_mut_ptr(); - let pointers = - x4::from_array(std::array::from_fn(|i| pointer.add(i * 2))); + let pointers = x4::from_array([pointer, pointer.add(2), pointer.add(4), pointer.add(6)]); let r_strided = simd_gather(default, pointers, mask); @@ -95,8 +57,7 @@ fn main() { // writing to *mut unsafe { let pointer = y.as_mut_ptr(); - let pointers = - x4::from_array(std::array::from_fn(|i| pointer.add(i * 2))); + let pointers = x4::from_array([pointer, pointer.add(2), pointer.add(4), pointer.add(6)]); let values = x4::from_array([y[7], y[6], y[5], y[1]]); simd_scatter(values, pointers, mask); @@ -114,3 +75,48 @@ fn main() { assert_eq!(y, s); } } + +const fn gather_scatter() { + let mut x = [0_f32, 1., 2., 3., 4., 5., 6., 7.]; + + let default = x4::from_array([-3_f32, -3., -3., -3.]); + let s_strided = x4::from_array([0_f32, 2., -3., 6.]); + let mask = x4::from_array([-1_i32, -1, 0, -1]); + + // reading from *const + unsafe { + let pointer = x.as_ptr(); + let pointers = x4::from_array([pointer, pointer.add(2), pointer.add(4), pointer.add(6)]); + + let r_strided = simd_gather(default, pointers, mask); + + assert_eq!(r_strided, s_strided); + } + + // reading from *mut + unsafe { + let pointer = x.as_mut_ptr(); + let pointers = x4::from_array([pointer, pointer.add(2), pointer.add(4), pointer.add(6)]); + + let r_strided = simd_gather(default, pointers, mask); + + assert_eq!(r_strided, s_strided); + } + + // writing to *mut + unsafe { + let pointer = x.as_mut_ptr(); + let pointers = x4::from_array([pointer, pointer.add(2), pointer.add(4), pointer.add(6)]); + + let values = x4::from_array([42_f32, 43_f32, 44_f32, 45_f32]); + simd_scatter(values, pointers, mask); + + assert_eq!(x, [42., 1., 43., 3., 4., 5., 45., 7.]); + } +} + +fn main() { + const { gather_scatter() }; + gather_scatter(); + gather_scatter_of_ptrs(); +} diff --git a/tests/ui/simd/intrinsic/generic-reduction-pass.rs b/tests/ui/simd/intrinsic/generic-reduction-pass.rs index 2d5d75447b661..2c615cd729e7b 100644 --- a/tests/ui/simd/intrinsic/generic-reduction-pass.rs +++ b/tests/ui/simd/intrinsic/generic-reduction-pass.rs @@ -1,35 +1,46 @@ //@ run-pass -#![allow(non_camel_case_types)] //@ ignore-emscripten +//@ compile-flags: --cfg minisimd_const // Test that the simd_reduce_{op} intrinsics produce the correct results. -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] -use std::intrinsics::simd::*; - -#[repr(simd)] -#[derive(Copy, Clone)] -struct i32x4(pub [i32; 4]); - -#[repr(simd)] -#[derive(Copy, Clone)] -struct u32x4(pub [u32; 4]); +#[path = "../../../auxiliary/minisimd.rs"] +mod minisimd; +use minisimd::*; -#[repr(simd)] -#[derive(Copy, Clone)] -struct f32x4(pub [f32; 4]); +use std::intrinsics::simd::*; -#[repr(simd)] -#[derive(Copy, Clone)] -struct b8x4(pub [i8; 4]); -fn main() { +fn unordered() { unsafe { - let x = i32x4([1, -2, 3, 4]); + let x = i32x4::from_array([1, -2, 3, 4]); let r: i32 = simd_reduce_add_unordered(x); assert_eq!(r, 6_i32); let r: i32 = simd_reduce_mul_unordered(x); assert_eq!(r, -24_i32); + } + + unsafe { + let x = u32x4::from_array([1, 2, 3, 4]); + let r: u32 = simd_reduce_add_unordered(x); + assert_eq!(r, 10_u32); + let r: u32 = simd_reduce_mul_unordered(x); + assert_eq!(r, 24_u32); + } + + unsafe { + let x = f32x4::from_array([1., -2., 3., 4.]); + let r: f32 = simd_reduce_add_unordered(x); + assert_eq!(r, 6_f32); + let r: f32 = simd_reduce_mul_unordered(x); + assert_eq!(r, -24_f32); + } +} + +const fn ordered() { + unsafe { + let x = i32x4::from_array([1, -2, 3, 4]); let r: i32 = simd_reduce_add_ordered(x, -1); assert_eq!(r, 5_i32); let r: i32 = simd_reduce_mul_ordered(x, -1); @@ -40,7 +51,7 @@ fn main() { let r: i32 = simd_reduce_max(x); assert_eq!(r, 4_i32); - let x = i32x4([-1, -1, -1, -1]); + let x = i32x4::from_array([-1, -1, -1, -1]); let r: i32 = simd_reduce_and(x); assert_eq!(r, -1_i32); let r: i32 = simd_reduce_or(x); @@ -48,7 +59,7 @@ fn main() { let r: i32 = simd_reduce_xor(x); assert_eq!(r, 0_i32); - let x = i32x4([-1, -1, 0, -1]); + let x = i32x4::from_array([-1, -1, 0, -1]); let r: i32 = simd_reduce_and(x); assert_eq!(r, 0_i32); let r: i32 = simd_reduce_or(x); @@ -58,11 +69,7 @@ fn main() { } unsafe { - let x = u32x4([1, 2, 3, 4]); - let r: u32 = simd_reduce_add_unordered(x); - assert_eq!(r, 10_u32); - let r: u32 = simd_reduce_mul_unordered(x); - assert_eq!(r, 24_u32); + let x = u32x4::from_array([1, 2, 3, 4]); let r: u32 = simd_reduce_add_ordered(x, 1); assert_eq!(r, 11_u32); let r: u32 = simd_reduce_mul_ordered(x, 2); @@ -74,7 +81,7 @@ fn main() { assert_eq!(r, 4_u32); let t = u32::MAX; - let x = u32x4([t, t, t, t]); + let x = u32x4::from_array([t, t, t, t]); let r: u32 = simd_reduce_and(x); assert_eq!(r, t); let r: u32 = simd_reduce_or(x); @@ -82,7 +89,7 @@ fn main() { let r: u32 = simd_reduce_xor(x); assert_eq!(r, 0_u32); - let x = u32x4([t, t, 0, t]); + let x = u32x4::from_array([t, t, 0, t]); let r: u32 = simd_reduce_and(x); assert_eq!(r, 0_u32); let r: u32 = simd_reduce_or(x); @@ -92,11 +99,7 @@ fn main() { } unsafe { - let x = f32x4([1., -2., 3., 4.]); - let r: f32 = simd_reduce_add_unordered(x); - assert_eq!(r, 6_f32); - let r: f32 = simd_reduce_mul_unordered(x); - assert_eq!(r, -24_f32); + let x = f32x4::from_array([1., -2., 3., 4.]); let r: f32 = simd_reduce_add_ordered(x, 0.); assert_eq!(r, 6_f32); let r: f32 = simd_reduce_mul_ordered(x, 1.); @@ -113,22 +116,28 @@ fn main() { } unsafe { - let x = b8x4([!0, !0, !0, !0]); + let x = i8x4::from_array([!0, !0, !0, !0]); let r: bool = simd_reduce_all(x); assert_eq!(r, true); let r: bool = simd_reduce_any(x); assert_eq!(r, true); - let x = b8x4([!0, !0, 0, !0]); + let x = i8x4::from_array([!0, !0, 0, !0]); let r: bool = simd_reduce_all(x); assert_eq!(r, false); let r: bool = simd_reduce_any(x); assert_eq!(r, true); - let x = b8x4([0, 0, 0, 0]); + let x = i8x4::from_array([0, 0, 0, 0]); let r: bool = simd_reduce_all(x); assert_eq!(r, false); let r: bool = simd_reduce_any(x); assert_eq!(r, false); } } + +fn main() { + unordered(); + const { ordered() }; + ordered(); +} diff --git a/tests/ui/simd/intrinsic/generic-select-pass.rs b/tests/ui/simd/intrinsic/generic-select-pass.rs index ff2d70d6a9782..ff02955f3aca6 100644 --- a/tests/ui/simd/intrinsic/generic-select-pass.rs +++ b/tests/ui/simd/intrinsic/generic-select-pass.rs @@ -2,9 +2,10 @@ #![allow(non_camel_case_types)] //@ ignore-emscripten //@ ignore-endian-big behavior of simd_select_bitmask is endian-specific +//@ compile-flags: --cfg minisimd_const // Test that the simd_select intrinsics produces correct results. -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -14,7 +15,7 @@ use std::intrinsics::simd::{simd_select, simd_select_bitmask}; type b8x4 = i8x4; -fn main() { +const fn select() { let m0 = b8x4::from_array([!0, !0, !0, !0]); let m1 = b8x4::from_array([0, 0, 0, 0]); let m2 = b8x4::from_array([!0, !0, 0, 0]); @@ -173,3 +174,8 @@ fn main() { assert_eq!(r, e); } } + +fn main() { + const { select() }; + select(); +} diff --git a/tests/ui/simd/masked-load-store.rs b/tests/ui/simd/masked-load-store.rs index f6682ad16725e..7098a4405c7fb 100644 --- a/tests/ui/simd/masked-load-store.rs +++ b/tests/ui/simd/masked-load-store.rs @@ -1,6 +1,7 @@ //@ ignore-backends: gcc +//@ compile-flags: --cfg minisimd_const //@ run-pass -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../auxiliary/minisimd.rs"] mod minisimd; @@ -8,7 +9,7 @@ use minisimd::*; use std::intrinsics::simd::{SimdAlign, simd_masked_load, simd_masked_store}; -fn main() { +const fn masked_load_store() { unsafe { let a = Simd::([0, 1, 2, 3]); let b_src = [4u8, 5, 6, 7]; @@ -37,3 +38,8 @@ fn main() { assert_eq!(&output, &[0, 1, 9, 6, u8::MAX]); } } + +fn main() { + const { masked_load_store() }; + masked_load_store(); +} diff --git a/tests/ui/simd/simd-bitmask-notpow2.rs b/tests/ui/simd/simd-bitmask-notpow2.rs index 991fe0d893379..1e805b008eab5 100644 --- a/tests/ui/simd/simd-bitmask-notpow2.rs +++ b/tests/ui/simd/simd-bitmask-notpow2.rs @@ -3,8 +3,9 @@ // This should be merged into `simd-bitmask` once that's fixed. //@ ignore-endian-big //@ ignore-backends: gcc +//@ compile-flags: --cfg minisimd_const -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../auxiliary/minisimd.rs"] mod minisimd; @@ -12,15 +13,10 @@ use minisimd::*; use std::intrinsics::simd::{simd_bitmask, simd_select_bitmask}; -fn main() { +const fn bitmask() { // Non-power-of-2 multi-byte mask. #[allow(non_camel_case_types)] type i32x10 = PackedSimd; - impl i32x10 { - fn splat(x: i32) -> Self { - Self([x; 10]) - } - } unsafe { let mask = i32x10::from_array([!0, !0, 0, !0, 0, 0, !0, 0, !0, 0]); let mask_bits = if cfg!(target_endian = "little") { 0b0101001011 } else { 0b1101001010 }; @@ -49,11 +45,6 @@ fn main() { // Test for a mask where the next multiple of 8 is not a power of two. #[allow(non_camel_case_types)] type i32x20 = PackedSimd; - impl i32x20 { - fn splat(x: i32) -> Self { - Self([x; 20]) - } - } unsafe { let mask = i32x20::from_array([ !0, !0, 0, !0, 0, @@ -91,3 +82,8 @@ fn main() { assert_eq!(selected2, mask); } } + +fn main() { + const { bitmask() }; + bitmask(); +} diff --git a/tests/ui/simd/simd-bitmask.rs b/tests/ui/simd/simd-bitmask.rs index 609dae3647b24..281a6ffb4ddd1 100644 --- a/tests/ui/simd/simd-bitmask.rs +++ b/tests/ui/simd/simd-bitmask.rs @@ -1,5 +1,6 @@ //@run-pass -#![feature(repr_simd, core_intrinsics)] +//@ compile-flags: --cfg minisimd_const +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../auxiliary/minisimd.rs"] mod minisimd; @@ -7,7 +8,7 @@ use minisimd::*; use std::intrinsics::simd::{simd_bitmask, simd_select_bitmask}; -fn main() { +const fn bitmask() { unsafe { let v = Simd::([-1, 0, -1, 0]); let i: u8 = simd_bitmask(v); @@ -68,3 +69,8 @@ fn main() { assert_eq!(r.into_array(), e); } } + +fn main() { + const { bitmask() }; + bitmask(); +}